diff --git a/cassandra-3.9-airline0.7.patch b/cassandra-3.9-airline0.7.patch index ef9005a..1af0297 100644 --- a/cassandra-3.9-airline0.7.patch +++ b/cassandra-3.9-airline0.7.patch @@ -1,6 +1,118 @@ -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java 2016-10-17 13:11:26.286759742 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java 2016-10-17 10:58:46.171126948 +0200 +From e24c2aa406fdd2407ce804709ef93933e5f1a888 Mon Sep 17 00:00:00 2001 +From: Tomas Repik +Date: Mon, 27 Mar 2017 08:31:14 +0200 +Subject: [PATCH] remove thrift files from airline patch + +--- + src/java/org/apache/cassandra/tools/NodeTool.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/Assassinate.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/BootstrapResume.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/CfStats.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/Cleanup.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/ClearSnapshot.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/Compact.java | 6 +++--- + .../org/apache/cassandra/tools/nodetool/CompactionHistory.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/Decommission.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/DescribeCluster.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/DescribeRing.java | 4 ++-- + .../apache/cassandra/tools/nodetool/DisableAutoCompaction.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java | 2 +- + .../org/apache/cassandra/tools/nodetool/DisableHintsForDC.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/Drain.java | 2 +- + .../org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java | 2 +- + .../org/apache/cassandra/tools/nodetool/EnableHintsForDC.java | 4 ++-- + .../org/apache/cassandra/tools/nodetool/FailureDetectorInfo.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/Flush.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/GcStats.java | 2 +- + .../apache/cassandra/tools/nodetool/GetCompactionThreshold.java | 4 ++-- + .../apache/cassandra/tools/nodetool/GetCompactionThroughput.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/GetEndpoints.java | 4 ++-- + .../cassandra/tools/nodetool/GetInterDCStreamThroughput.java | 2 +- + .../org/apache/cassandra/tools/nodetool/GetLoggingLevels.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/GetSSTables.java | 8 ++++---- + .../org/apache/cassandra/tools/nodetool/GetStreamThroughput.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/GetTimeout.java | 4 ++-- + .../org/apache/cassandra/tools/nodetool/GetTraceProbability.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/Info.java | 4 ++-- + .../apache/cassandra/tools/nodetool/InvalidateCounterCache.java | 2 +- + .../org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java | 2 +- + .../org/apache/cassandra/tools/nodetool/InvalidateRowCache.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/Join.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/Move.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/NetStats.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/ProxyHistograms.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/RangeKeySample.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/Rebuild.java | 8 ++++---- + src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/Refresh.java | 4 ++-- + .../org/apache/cassandra/tools/nodetool/RefreshSizeEstimates.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java | 2 +- + .../org/apache/cassandra/tools/nodetool/RelocateSSTables.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/RemoveNode.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/Repair.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/ReplayBatchlog.java | 2 +- + .../org/apache/cassandra/tools/nodetool/ResetLocalSchema.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/Ring.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/Scrub.java | 6 +++--- + .../org/apache/cassandra/tools/nodetool/SetCacheCapacity.java | 4 ++-- + .../org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java | 4 ++-- + .../apache/cassandra/tools/nodetool/SetCompactionThreshold.java | 4 ++-- + .../apache/cassandra/tools/nodetool/SetCompactionThroughput.java | 4 ++-- + .../cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java | 4 ++-- + .../cassandra/tools/nodetool/SetInterDCStreamThroughput.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java | 4 ++-- + .../org/apache/cassandra/tools/nodetool/SetStreamThroughput.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/SetTimeout.java | 4 ++-- + .../org/apache/cassandra/tools/nodetool/SetTraceProbability.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/Snapshot.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/Status.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/StatusBackup.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/StatusBinary.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/StatusGossip.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/Stop.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/StopDaemon.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/TableStats.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/TopPartitions.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/TpStats.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java | 4 ++-- + src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/Verify.java | 6 +++--- + src/java/org/apache/cassandra/tools/nodetool/Version.java | 2 +- + src/java/org/apache/cassandra/tools/nodetool/ViewBuildStatus.java | 4 ++-- + 90 files changed, 154 insertions(+), 154 deletions(-) + +diff --git a/src/java/org/apache/cassandra/tools/NodeTool.java b/src/java/org/apache/cassandra/tools/NodeTool.java +index c264c31..e83828a 100644 +--- a/src/java/org/apache/cassandra/tools/NodeTool.java ++++ b/src/java/org/apache/cassandra/tools/NodeTool.java +@@ -28,7 +28,7 @@ import com.google.common.base.Joiner; + import com.google.common.base.Throwables; + import com.google.common.collect.*; + +-import io.airlift.command.*; ++import io.airlift.airline.*; + + import org.apache.cassandra.locator.EndpointSnitchInfoMBean; + import org.apache.cassandra.tools.nodetool.*; +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java b/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java +index 56fec44..a075ded 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -12,9 +124,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/As import java.net.UnknownHostException; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/BootstrapResume.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/BootstrapResume.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/BootstrapResume.java 2016-10-17 13:11:26.289759772 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/BootstrapResume.java 2016-10-17 10:58:46.171126948 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/BootstrapResume.java b/src/java/org/apache/cassandra/tools/nodetool/BootstrapResume.java +index bb47e10..7be9173 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/BootstrapResume.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/BootstrapResume.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -24,9 +137,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Bo import java.io.IOError; import java.io.IOException; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java 2016-10-17 13:11:26.289759772 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java 2016-10-17 10:58:46.171126948 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java b/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java +index 69d3b4a..8fdf803 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -36,9 +150,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Cf /** * @deprecated use TableHistograms -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/CfStats.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/CfStats.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/CfStats.java 2016-10-17 13:11:26.289759772 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/CfStats.java 2016-10-17 10:58:46.171126948 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/CfStats.java b/src/java/org/apache/cassandra/tools/nodetool/CfStats.java +index 15c72ba..2d27ea0 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/CfStats.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/CfStats.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -48,9 +163,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Cf /** * @deprecated use TableStats -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Cleanup.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Cleanup.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Cleanup.java 2016-10-17 13:11:26.289759772 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Cleanup.java 2016-10-17 10:58:46.171126948 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Cleanup.java b/src/java/org/apache/cassandra/tools/nodetool/Cleanup.java +index 41e9b01..3e97f03 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Cleanup.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Cleanup.java @@ -17,14 +17,14 @@ */ package org.apache.cassandra.tools.nodetool; @@ -69,10 +185,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Cl import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ClearSnapshot.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ClearSnapshot.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ClearSnapshot.java 2016-10-17 13:11:26.289759772 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ClearSnapshot.java 2016-10-17 10:58:46.171126948 +0200 -@@ -20,9 +20,9 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/ClearSnapshot.java b/src/java/org/apache/cassandra/tools/nodetool/ClearSnapshot.java +index 7167bd9..10a8117 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/ClearSnapshot.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/ClearSnapshot.java +@@ -20,9 +20,9 @@ package org.apache.cassandra.tools.nodetool; import static com.google.common.collect.Iterables.toArray; import static org.apache.commons.lang3.StringUtils.EMPTY; import static org.apache.commons.lang3.StringUtils.join; @@ -85,10 +202,28 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Cl import java.io.IOException; import java.util.ArrayList; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/CompactionHistory.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/CompactionHistory.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/CompactionHistory.java 2016-10-17 13:11:26.290759781 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/CompactionHistory.java 2016-10-17 10:58:46.172126957 +0200 -@@ -26,7 +26,7 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Compact.java b/src/java/org/apache/cassandra/tools/nodetool/Compact.java +index f268f0a..0bb89e2 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Compact.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Compact.java +@@ -17,9 +17,9 @@ + */ + package org.apache.cassandra.tools.nodetool; + +-import io.airlift.command.Arguments; +-import io.airlift.command.Command; +-import io.airlift.command.Option; ++import io.airlift.airline.Arguments; ++import io.airlift.airline.Command; ++import io.airlift.airline.Option; + + import java.util.ArrayList; + import java.util.List; +diff --git a/src/java/org/apache/cassandra/tools/nodetool/CompactionHistory.java b/src/java/org/apache/cassandra/tools/nodetool/CompactionHistory.java +index 40c6887..55032c4 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/CompactionHistory.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/CompactionHistory.java +@@ -26,7 +26,7 @@ import java.util.List; import java.util.Set; import javax.management.openmbean.TabularData; @@ -97,10 +232,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Co import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java 2016-10-17 13:11:26.290759781 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java 2016-10-17 10:58:46.172126957 +0200 -@@ -22,8 +22,8 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java b/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java +index 69fcbab..908c255 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java +@@ -22,8 +22,8 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -111,25 +247,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Co import org.apache.cassandra.db.compaction.CompactionManagerMBean; import org.apache.cassandra.db.compaction.OperationType; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Compact.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Compact.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Compact.java 2016-10-17 13:11:26.290759781 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Compact.java 2016-10-17 10:58:46.172126957 +0200 -@@ -17,9 +17,9 @@ - */ - package org.apache.cassandra.tools.nodetool; - --import io.airlift.command.Arguments; --import io.airlift.command.Command; --import io.airlift.command.Option; -+import io.airlift.airline.Arguments; -+import io.airlift.airline.Command; -+import io.airlift.airline.Option; - - import java.util.ArrayList; - import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Decommission.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Decommission.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Decommission.java 2016-10-17 13:11:26.290759781 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Decommission.java 2016-10-17 10:58:46.172126957 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Decommission.java b/src/java/org/apache/cassandra/tools/nodetool/Decommission.java +index 34890e0..f18a246 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Decommission.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Decommission.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -139,9 +260,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/De import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DescribeCluster.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DescribeCluster.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DescribeCluster.java 2016-10-17 13:11:26.290759781 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DescribeCluster.java 2016-10-17 10:58:46.172126957 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/DescribeCluster.java b/src/java/org/apache/cassandra/tools/nodetool/DescribeCluster.java +index 81dee20..9bf3990 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/DescribeCluster.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/DescribeCluster.java @@ -18,7 +18,7 @@ package org.apache.cassandra.tools.nodetool; @@ -151,9 +273,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/De import java.util.List; import java.util.Map; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DescribeRing.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DescribeRing.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DescribeRing.java 2016-10-17 13:11:26.291759791 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DescribeRing.java 2016-10-17 10:58:46.172126957 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/DescribeRing.java b/src/java/org/apache/cassandra/tools/nodetool/DescribeRing.java +index a120ffe..2a73c2a 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/DescribeRing.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/DescribeRing.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -165,9 +288,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/De import java.io.IOException; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java 2016-10-17 13:11:26.291759791 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java 2016-10-17 10:58:46.172126957 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java b/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java +index 4d35ded..b9fc7d6 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -179,9 +303,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Di import java.io.IOException; import java.util.ArrayList; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java 2016-10-17 13:11:26.291759791 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java 2016-10-17 10:58:46.173126965 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java b/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java +index 74e7f50..4b0bfbe 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -191,9 +316,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Di import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java 2016-10-17 13:11:26.291759791 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java 2016-10-17 10:58:46.173126965 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java b/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java +index dee319b..463f2b0 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -203,9 +329,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Di import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java 2016-10-17 13:11:26.291759791 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java 2016-10-17 10:58:46.173126965 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java b/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java +index 32448c9..6f950bb 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -215,9 +342,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Di import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java 2016-10-17 13:11:26.291759791 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java 2016-10-17 10:58:46.173126965 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java +index 11cd754..d7ec35f 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -227,10 +355,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Di import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java 2016-10-17 13:11:26.291759791 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java 2016-10-17 10:58:46.173126965 +0200 -@@ -20,8 +20,8 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java b/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java +index 7072318..d65c70b 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java +@@ -20,8 +20,8 @@ package org.apache.cassandra.tools.nodetool; import java.util.ArrayList; import java.util.List; @@ -241,21 +370,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Di import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableThrift.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableThrift.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/DisableThrift.java 2016-10-17 13:11:26.292759801 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/DisableThrift.java 2016-10-17 10:58:46.173126965 +0200 -@@ -17,7 +17,7 @@ - */ - package org.apache.cassandra.tools.nodetool; - --import io.airlift.command.Command; -+import io.airlift.airline.Command; - - import org.apache.cassandra.tools.NodeProbe; - import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Drain.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Drain.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Drain.java 2016-10-17 13:11:26.292759801 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Drain.java 2016-10-17 10:58:46.173126965 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Drain.java b/src/java/org/apache/cassandra/tools/nodetool/Drain.java +index 5562e6d..eaa537a 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Drain.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Drain.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -265,9 +383,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Dr import java.io.IOException; import java.util.concurrent.ExecutionException; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java 2016-10-17 13:11:26.292759801 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java 2016-10-17 10:58:46.173126965 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java b/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java +index c758df8..795ab13 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -279,9 +398,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/En import java.io.IOException; import java.util.ArrayList; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java 2016-10-17 13:11:26.292759801 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java 2016-10-17 10:58:46.174126974 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java b/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java +index 4847fa5..d1773d9 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -291,9 +411,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/En import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java 2016-10-17 13:11:26.292759801 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java 2016-10-17 10:58:46.174126974 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java b/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java +index f1d5d9c..506945f 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -303,9 +424,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/En import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java 2016-10-17 13:11:26.292759801 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java 2016-10-17 10:58:46.174126974 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java b/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java +index 16a9f4b..900c427 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -315,9 +437,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/En import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java 2016-10-17 13:11:26.293759811 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java 2016-10-17 10:58:46.174126974 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java +index 149c0fc..bccf7e7 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -327,10 +450,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/En import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableHintsForDC.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableHintsForDC.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableHintsForDC.java 2016-10-17 13:11:26.293759811 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableHintsForDC.java 2016-10-17 10:58:46.174126974 +0200 -@@ -20,8 +20,8 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableHintsForDC.java b/src/java/org/apache/cassandra/tools/nodetool/EnableHintsForDC.java +index 1979ebd..97e40e0 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/EnableHintsForDC.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/EnableHintsForDC.java +@@ -20,8 +20,8 @@ package org.apache.cassandra.tools.nodetool; import java.util.ArrayList; import java.util.List; @@ -341,21 +465,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/En import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableThrift.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableThrift.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/EnableThrift.java 2016-10-17 13:11:26.293759811 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/EnableThrift.java 2016-10-17 10:58:46.174126974 +0200 -@@ -17,7 +17,7 @@ - */ - package org.apache.cassandra.tools.nodetool; - --import io.airlift.command.Command; -+import io.airlift.airline.Command; - - import org.apache.cassandra.tools.NodeProbe; - import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/FailureDetectorInfo.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/FailureDetectorInfo.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/FailureDetectorInfo.java 2016-10-17 13:11:26.293759811 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/FailureDetectorInfo.java 2016-10-17 10:58:46.174126974 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/FailureDetectorInfo.java b/src/java/org/apache/cassandra/tools/nodetool/FailureDetectorInfo.java +index 3c0303d..b3ffb6d 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/FailureDetectorInfo.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/FailureDetectorInfo.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -365,9 +478,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Fa import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Flush.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Flush.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Flush.java 2016-10-17 13:11:26.293759811 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Flush.java 2016-10-17 10:58:46.175126982 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Flush.java b/src/java/org/apache/cassandra/tools/nodetool/Flush.java +index f768615..c83e420 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Flush.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Flush.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -379,10 +493,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Fl import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GcStats.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GcStats.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GcStats.java 2016-10-17 13:11:26.293759811 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GcStats.java 2016-10-17 10:58:46.175126982 +0200 -@@ -20,7 +20,7 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GcStats.java b/src/java/org/apache/cassandra/tools/nodetool/GcStats.java +index dd38fe7..07ae6d9 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GcStats.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GcStats.java +@@ -20,7 +20,7 @@ package org.apache.cassandra.tools.nodetool; import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; @@ -391,9 +506,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Gc @Command(name = "gcstats", description = "Print GC Statistics") public class GcStats extends NodeToolCmd -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThreshold.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThreshold.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThreshold.java 2016-10-17 13:11:26.293759811 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThreshold.java 2016-10-17 10:58:46.175126982 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThreshold.java b/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThreshold.java +index 6c629de..589b1b3 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThreshold.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThreshold.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -405,9 +521,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ge import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java 2016-10-17 13:11:26.294759821 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java 2016-10-17 10:58:46.175126982 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java +index c3af184..a7df4d1 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -417,9 +534,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ge import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetEndpoints.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetEndpoints.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetEndpoints.java 2016-10-17 13:11:26.294759821 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetEndpoints.java 2016-10-17 10:58:46.175126982 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetEndpoints.java b/src/java/org/apache/cassandra/tools/nodetool/GetEndpoints.java +index 49d2148..922ae26 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GetEndpoints.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GetEndpoints.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -431,9 +549,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ge import java.net.InetAddress; import java.util.ArrayList; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java 2016-10-17 13:11:26.294759821 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java 2016-10-17 10:58:46.175126982 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java +index 4c354c0..039814e 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -443,9 +562,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ge import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetLoggingLevels.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetLoggingLevels.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetLoggingLevels.java 2016-10-17 13:11:26.294759821 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetLoggingLevels.java 2016-10-17 10:58:46.175126982 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetLoggingLevels.java b/src/java/org/apache/cassandra/tools/nodetool/GetLoggingLevels.java +index 7ce0017..90d6817 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GetLoggingLevels.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GetLoggingLevels.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -455,9 +575,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ge import java.util.Map; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetSSTables.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetSSTables.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetSSTables.java 2016-10-17 13:11:26.294759821 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetSSTables.java 2016-10-17 13:13:11.617792304 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetSSTables.java b/src/java/org/apache/cassandra/tools/nodetool/GetSSTables.java +index 849ad94..48a5723 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GetSSTables.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GetSSTables.java @@ -18,13 +18,13 @@ package org.apache.cassandra.tools.nodetool; @@ -475,16 +596,17 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ge import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -@@ -53,4 +53,4 @@ +@@ -53,4 +53,4 @@ public class GetSSTables extends NodeToolCmd System.out.println(sstable); } } -} \ No newline at end of file +} -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java 2016-10-17 13:11:26.294759821 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java 2016-10-17 10:58:46.176126991 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java +index 437eb54..b76d14b 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -494,9 +616,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ge import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetTimeout.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetTimeout.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetTimeout.java 2016-10-17 13:11:26.294759821 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetTimeout.java 2016-10-17 10:58:46.176126991 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetTimeout.java b/src/java/org/apache/cassandra/tools/nodetool/GetTimeout.java +index b12c9a7..6c9b541 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GetTimeout.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GetTimeout.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -508,9 +631,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ge import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetTraceProbability.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetTraceProbability.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GetTraceProbability.java 2016-10-17 13:11:26.295759830 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GetTraceProbability.java 2016-10-17 10:58:46.176126991 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetTraceProbability.java b/src/java/org/apache/cassandra/tools/nodetool/GetTraceProbability.java +index 3940790..374ab2c 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GetTraceProbability.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GetTraceProbability.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -520,9 +644,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ge import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java 2016-10-17 13:11:26.295759830 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java 2016-10-17 10:58:46.176126991 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java b/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java +index 2acfcf1..1b4b979 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/GossipInfo.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -532,9 +657,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Go import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Info.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Info.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Info.java 2016-10-17 13:11:26.295759830 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Info.java 2016-10-17 10:58:46.176126991 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Info.java b/src/java/org/apache/cassandra/tools/nodetool/Info.java +index bddd124..d95b58a 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Info.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Info.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -546,9 +672,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/In import java.lang.management.MemoryUsage; import java.util.Iterator; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java 2016-10-17 13:11:26.295759830 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java 2016-10-17 10:58:46.176126991 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java +index a5f0ebc..aef77bd 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -558,9 +685,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/In import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java 2016-10-17 13:11:26.295759830 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java 2016-10-17 10:58:46.177126999 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java +index 70abd53..cfe7d2f 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -570,9 +698,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/In import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java 2016-10-17 13:11:26.295759830 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java 2016-10-17 10:58:46.177126999 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java +index 149f80b..7357e27 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -582,9 +711,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/In import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Join.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Join.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Join.java 2016-10-17 13:11:26.295759830 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Join.java 2016-10-17 10:58:46.177126999 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Join.java b/src/java/org/apache/cassandra/tools/nodetool/Join.java +index 5815591..5a11ab8 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Join.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Join.java @@ -18,7 +18,7 @@ package org.apache.cassandra.tools.nodetool; @@ -594,10 +724,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Jo import java.io.IOException; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java 2016-10-17 13:11:26.296759840 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java 2016-10-17 10:58:46.177126999 +0200 -@@ -22,7 +22,7 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java b/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java +index 1b3065b..e73fcdc 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java +@@ -22,7 +22,7 @@ import java.util.Map; import java.util.Set; import javax.management.openmbean.TabularData; @@ -606,9 +737,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Li import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.tools.NodeProbe; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Move.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Move.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Move.java 2016-10-17 13:11:26.296759840 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Move.java 2016-10-17 10:58:46.177126999 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Move.java b/src/java/org/apache/cassandra/tools/nodetool/Move.java +index fc6b1bf..8654d25 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Move.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Move.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -620,9 +752,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Mo import java.io.IOException; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/NetStats.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/NetStats.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/NetStats.java 2016-10-17 13:11:26.296759840 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/NetStats.java 2016-10-17 10:58:46.177126999 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/NetStats.java b/src/java/org/apache/cassandra/tools/nodetool/NetStats.java +index c171a3e..306e27b 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/NetStats.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/NetStats.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -634,9 +767,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ne import java.util.Set; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java 2016-10-17 13:11:26.296759840 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java 2016-10-17 10:58:46.177126999 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java +index ed1f655..4ec70d8 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -646,9 +780,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Pa import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ProxyHistograms.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ProxyHistograms.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ProxyHistograms.java 2016-10-17 13:11:26.297759850 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ProxyHistograms.java 2016-10-17 10:58:46.177126999 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/ProxyHistograms.java b/src/java/org/apache/cassandra/tools/nodetool/ProxyHistograms.java +index 656e7ed..620d75a 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/ProxyHistograms.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/ProxyHistograms.java @@ -18,7 +18,7 @@ package org.apache.cassandra.tools.nodetool; @@ -658,9 +793,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Pr import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/RangeKeySample.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/RangeKeySample.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/RangeKeySample.java 2016-10-17 13:11:26.297759850 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/RangeKeySample.java 2016-10-17 10:58:46.178127007 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/RangeKeySample.java b/src/java/org/apache/cassandra/tools/nodetool/RangeKeySample.java +index e079a4b..1ca2aa9 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/RangeKeySample.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/RangeKeySample.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -670,23 +806,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ra import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java 2016-10-17 13:11:26.298759860 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java 2016-10-17 10:58:46.178127007 +0200 -@@ -19,8 +19,8 @@ - - import static com.google.common.base.Preconditions.checkArgument; - import static com.google.common.collect.Iterables.toArray; --import io.airlift.command.Arguments; --import io.airlift.command.Command; -+import io.airlift.airline.Arguments; -+import io.airlift.airline.Command; - - import java.util.ArrayList; - import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java 2016-10-17 13:11:26.298759860 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java 2016-10-17 10:52:30.337742600 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java b/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java +index 865f9fe..1d4e3f7 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java @@ -17,9 +17,9 @@ */ package org.apache.cassandra.tools.nodetool; @@ -700,16 +823,32 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Re import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -@@ -52,4 +52,4 @@ +@@ -52,4 +52,4 @@ public class Rebuild extends NodeToolCmd probe.rebuild(sourceDataCenterName, keyspace, tokens); } -} \ No newline at end of file +} -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Refresh.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Refresh.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Refresh.java 2016-10-17 13:11:26.298759860 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Refresh.java 2016-10-17 10:58:46.178127007 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java b/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java +index 5fd7327..4a6b071 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java +@@ -19,8 +19,8 @@ package org.apache.cassandra.tools.nodetool; + + import static com.google.common.base.Preconditions.checkArgument; + import static com.google.common.collect.Iterables.toArray; +-import io.airlift.command.Arguments; +-import io.airlift.command.Command; ++import io.airlift.airline.Arguments; ++import io.airlift.airline.Command; + + import java.util.ArrayList; + import java.util.List; +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Refresh.java b/src/java/org/apache/cassandra/tools/nodetool/Refresh.java +index 153255c..726f12a 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Refresh.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Refresh.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -721,9 +860,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Re import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/RefreshSizeEstimates.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/RefreshSizeEstimates.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/RefreshSizeEstimates.java 2016-10-17 13:11:26.299759870 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/RefreshSizeEstimates.java 2016-10-17 10:58:46.178127007 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/RefreshSizeEstimates.java b/src/java/org/apache/cassandra/tools/nodetool/RefreshSizeEstimates.java +index 870c7b4..586b451 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/RefreshSizeEstimates.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/RefreshSizeEstimates.java @@ -18,7 +18,7 @@ package org.apache.cassandra.tools.nodetool; @@ -733,9 +873,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Re import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java 2016-10-17 13:11:26.299759870 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java 2016-10-17 10:58:46.178127007 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java b/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java +index 416aff0..6ca90fb 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -745,10 +886,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Re import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/RelocateSSTables.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/RelocateSSTables.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/RelocateSSTables.java 2016-10-17 13:11:26.299759870 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/RelocateSSTables.java 2016-10-17 10:58:46.178127007 +0200 -@@ -20,9 +20,9 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/RelocateSSTables.java b/src/java/org/apache/cassandra/tools/nodetool/RelocateSSTables.java +index 7c3066c..853b1d3 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/RelocateSSTables.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/RelocateSSTables.java +@@ -20,9 +20,9 @@ package org.apache.cassandra.tools.nodetool; import java.util.ArrayList; import java.util.List; @@ -761,9 +903,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Re import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/RemoveNode.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/RemoveNode.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/RemoveNode.java 2016-10-17 13:11:26.300759879 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/RemoveNode.java 2016-10-17 10:58:46.179127016 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/RemoveNode.java b/src/java/org/apache/cassandra/tools/nodetool/RemoveNode.java +index 848049e..7312597 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/RemoveNode.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/RemoveNode.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -775,10 +918,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Re import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Repair.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Repair.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Repair.java 2016-10-17 13:11:26.300759879 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Repair.java 2016-10-17 10:58:46.179127016 +0200 -@@ -19,9 +19,9 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Repair.java b/src/java/org/apache/cassandra/tools/nodetool/Repair.java +index 02bfc5b..085574a 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Repair.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Repair.java +@@ -19,9 +19,9 @@ package org.apache.cassandra.tools.nodetool; import static com.google.common.collect.Lists.newArrayList; import static org.apache.commons.lang3.StringUtils.EMPTY; @@ -791,10 +935,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Re import java.util.ArrayList; import java.util.HashMap; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ReplayBatchlog.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ReplayBatchlog.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ReplayBatchlog.java 2016-10-17 13:11:26.300759879 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ReplayBatchlog.java 2016-10-17 10:58:46.179127016 +0200 -@@ -21,7 +21,7 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/ReplayBatchlog.java b/src/java/org/apache/cassandra/tools/nodetool/ReplayBatchlog.java +index e3dcbd4..23c31f7 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/ReplayBatchlog.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/ReplayBatchlog.java +@@ -21,7 +21,7 @@ package org.apache.cassandra.tools.nodetool; import java.io.IOError; import java.io.IOException; @@ -803,9 +948,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Re import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java 2016-10-17 13:11:26.300759879 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java 2016-10-17 10:58:46.179127016 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java b/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java +index 43280ab..708636f 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -815,9 +961,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Re import java.io.IOException; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java 2016-10-17 13:11:26.300759879 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java 2016-10-17 10:58:46.179127016 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java +index 584ab64..a3984f8 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -827,9 +974,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Re import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ring.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Ring.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ring.java 2016-10-17 13:11:26.301759889 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Ring.java 2016-10-17 10:58:46.179127016 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Ring.java b/src/java/org/apache/cassandra/tools/nodetool/Ring.java +index 55220a1..e6f8582 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Ring.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Ring.java @@ -18,9 +18,9 @@ package org.apache.cassandra.tools.nodetool; @@ -843,9 +991,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ri import java.net.InetAddress; import java.net.UnknownHostException; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Scrub.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Scrub.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Scrub.java 2016-10-17 13:11:26.301759889 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Scrub.java 2016-10-17 10:58:46.179127016 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Scrub.java b/src/java/org/apache/cassandra/tools/nodetool/Scrub.java +index 2345a85..9cc9fe6 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Scrub.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Scrub.java @@ -17,9 +17,9 @@ */ package org.apache.cassandra.tools.nodetool; @@ -859,9 +1008,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Sc import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java 2016-10-17 13:11:26.301759889 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java 2016-10-17 10:58:46.180127024 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java b/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java +index 6c280d8..461f6ae 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -873,9 +1023,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Se import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java 2016-10-17 13:11:26.301759889 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java 2016-10-17 10:58:46.180127024 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java b/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java +index 12a4570..18197e6 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -887,10 +1038,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Se import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java 2016-10-17 13:11:26.301759889 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java 2016-10-17 10:58:46.180127024 +0200 -@@ -19,8 +19,8 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java +index 304f2b7..56e558f 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java +@@ -19,8 +19,8 @@ package org.apache.cassandra.tools.nodetool; import static com.google.common.base.Preconditions.checkArgument; import static java.lang.Integer.parseInt; @@ -901,9 +1053,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Se import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java 2016-10-17 13:11:26.301759889 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java 2016-10-17 10:58:46.180127024 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java +index 0111a20..80e7222 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -915,9 +1068,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Se import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java 2016-10-17 13:11:26.301759889 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java 2016-10-17 10:58:46.180127024 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java b/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java +index d20ff3f..feb945b 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -929,9 +1083,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Se import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java 2016-10-17 13:11:26.302759899 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java 2016-10-17 10:58:46.180127024 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java +index 41ce43a..e2e606c 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -943,9 +1098,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Se import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java 2016-10-17 13:11:26.302759899 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java 2016-10-17 10:58:46.180127024 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java b/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java +index d11d48f..94e4e3b 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -957,9 +1113,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Se import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java 2016-10-17 13:11:26.302759899 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java 2016-10-17 10:58:46.181127033 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java +index 8055872..069a6e9 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -971,9 +1128,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Se import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetTimeout.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetTimeout.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetTimeout.java 2016-10-17 13:11:26.302759899 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetTimeout.java 2016-10-17 10:58:46.181127033 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetTimeout.java b/src/java/org/apache/cassandra/tools/nodetool/SetTimeout.java +index 0b99efd..06b859b 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/SetTimeout.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/SetTimeout.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools.nodetool; @@ -985,9 +1143,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Se import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java 2016-10-17 13:11:26.302759899 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java 2016-10-17 10:58:46.181127033 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java b/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java +index ebef1a4..e081980 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -999,10 +1158,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Se import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java 2016-10-17 13:11:26.302759899 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java 2016-10-17 10:58:46.181127033 +0200 -@@ -19,9 +19,9 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java b/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java +index 8941ec1..8d01d3a 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Snapshot.java +@@ -19,9 +19,9 @@ package org.apache.cassandra.tools.nodetool; import static com.google.common.collect.Iterables.toArray; import static org.apache.commons.lang3.StringUtils.join; @@ -1015,9 +1175,27 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Sn import java.io.IOException; import java.util.ArrayList; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StatusBackup.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StatusBackup.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StatusBackup.java 2016-10-17 13:11:26.302759899 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StatusBackup.java 2016-10-17 10:58:46.181127033 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Status.java b/src/java/org/apache/cassandra/tools/nodetool/Status.java +index a43b703..028c1aa 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Status.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Status.java +@@ -17,9 +17,9 @@ + */ + package org.apache.cassandra.tools.nodetool; + +-import io.airlift.command.Arguments; +-import io.airlift.command.Command; +-import io.airlift.command.Option; ++import io.airlift.airline.Arguments; ++import io.airlift.airline.Command; ++import io.airlift.airline.Option; + + import java.net.InetAddress; + import java.net.UnknownHostException; +diff --git a/src/java/org/apache/cassandra/tools/nodetool/StatusBackup.java b/src/java/org/apache/cassandra/tools/nodetool/StatusBackup.java +index 49a6750..84fa8a5 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/StatusBackup.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/StatusBackup.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -1027,9 +1205,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/St import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StatusBinary.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StatusBinary.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StatusBinary.java 2016-10-17 13:11:26.303759909 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StatusBinary.java 2016-10-17 10:58:46.181127033 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/StatusBinary.java b/src/java/org/apache/cassandra/tools/nodetool/StatusBinary.java +index d4fae14..45fe6c3 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/StatusBinary.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/StatusBinary.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -1039,9 +1218,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/St import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StatusGossip.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StatusGossip.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StatusGossip.java 2016-10-17 13:11:26.303759909 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StatusGossip.java 2016-10-17 10:58:46.182127041 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/StatusGossip.java b/src/java/org/apache/cassandra/tools/nodetool/StatusGossip.java +index e40df8d..b6a1164 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/StatusGossip.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/StatusGossip.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -1051,9 +1231,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/St import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java 2016-10-17 13:11:26.303759909 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java 2016-10-17 10:58:46.182127041 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java +index 65f6729..bee161d 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/StatusHandoff.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -1063,9 +1244,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/St import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Status.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Status.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Status.java 2016-10-17 13:11:26.303759909 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Status.java 2016-10-17 10:58:46.183127050 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Stop.java b/src/java/org/apache/cassandra/tools/nodetool/Stop.java +index 6229e65..d529319 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Stop.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Stop.java @@ -17,9 +17,9 @@ */ package org.apache.cassandra.tools.nodetool; @@ -1077,23 +1259,12 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/St +import io.airlift.airline.Command; +import io.airlift.airline.Option; - import java.net.InetAddress; - import java.net.UnknownHostException; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StatusThrift.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StatusThrift.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StatusThrift.java 2016-10-17 13:11:26.303759909 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StatusThrift.java 2016-10-17 10:58:46.183127050 +0200 -@@ -17,7 +17,7 @@ - */ - package org.apache.cassandra.tools.nodetool; - --import io.airlift.command.Command; -+import io.airlift.airline.Command; - + import org.apache.cassandra.db.compaction.OperationType; import org.apache.cassandra.tools.NodeProbe; - import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StopDaemon.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StopDaemon.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/StopDaemon.java 2016-10-17 13:11:26.303759909 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/StopDaemon.java 2016-10-17 10:58:46.183127050 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/StopDaemon.java b/src/java/org/apache/cassandra/tools/nodetool/StopDaemon.java +index a0af89f..fde0245 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/StopDaemon.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/StopDaemon.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -1103,26 +1274,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/St import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Stop.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Stop.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Stop.java 2016-10-17 13:11:26.303759909 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Stop.java 2016-10-17 10:58:46.184127058 +0200 -@@ -17,9 +17,9 @@ - */ - package org.apache.cassandra.tools.nodetool; - --import io.airlift.command.Arguments; --import io.airlift.command.Command; --import io.airlift.command.Option; -+import io.airlift.airline.Arguments; -+import io.airlift.airline.Command; -+import io.airlift.airline.Option; - - import org.apache.cassandra.db.compaction.OperationType; - import org.apache.cassandra.tools.NodeProbe; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java 2016-10-17 13:11:26.304759919 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java 2016-10-17 10:58:46.184127058 +0200 -@@ -19,8 +19,8 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java b/src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java +index 8f4ffa6..5286e18 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/TableHistograms.java +@@ -19,8 +19,8 @@ package org.apache.cassandra.tools.nodetool; import static com.google.common.base.Preconditions.checkArgument; import static java.lang.String.format; @@ -1133,10 +1289,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ta import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/TableStats.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/TableStats.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/TableStats.java 2016-10-17 13:11:26.304759919 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/TableStats.java 2016-10-17 10:55:04.295160059 +0200 -@@ -21,9 +21,9 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/TableStats.java b/src/java/org/apache/cassandra/tools/nodetool/TableStats.java +index ec729a5..6b23fdf 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/TableStats.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/TableStats.java +@@ -21,9 +21,9 @@ import java.util.*; import javax.management.InstanceNotFoundException; import com.google.common.collect.ArrayListMultimap; @@ -1149,10 +1306,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ta import org.apache.cassandra.db.ColumnFamilyStoreMBean; import org.apache.cassandra.io.util.FileUtils; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/TopPartitions.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/TopPartitions.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/TopPartitions.java 2016-10-17 13:11:26.304759919 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/TopPartitions.java 2016-10-17 10:58:46.185127067 +0200 -@@ -19,9 +19,9 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/TopPartitions.java b/src/java/org/apache/cassandra/tools/nodetool/TopPartitions.java +index b473a8d..27a178f 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/TopPartitions.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/TopPartitions.java +@@ -19,9 +19,9 @@ package org.apache.cassandra.tools.nodetool; import static com.google.common.base.Preconditions.checkArgument; import static org.apache.commons.lang3.StringUtils.join; @@ -1165,9 +1323,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/To import java.util.ArrayList; import java.util.Collections; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/TpStats.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/TpStats.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/TpStats.java 2016-10-17 13:11:26.304759919 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/TpStats.java 2016-10-17 10:58:46.185127067 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/TpStats.java b/src/java/org/apache/cassandra/tools/nodetool/TpStats.java +index 5d3eab7..11f3470 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/TpStats.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/TpStats.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -1177,9 +1336,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Tp import java.util.Map; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java 2016-10-17 13:11:26.304759919 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java 2016-10-17 10:58:46.186127075 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java b/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java +index bcd554f..a3a0049 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools.nodetool; @@ -1191,9 +1351,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Tr import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java 2016-10-17 13:11:26.304759919 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java 2016-10-17 10:58:46.186127075 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java b/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java +index 82866e0..c957c8c 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/UpgradeSSTable.java @@ -17,9 +17,9 @@ */ package org.apache.cassandra.tools.nodetool; @@ -1207,9 +1368,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Up import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Verify.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Verify.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Verify.java 2016-10-17 13:11:26.304759919 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Verify.java 2016-10-17 10:58:46.186127075 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Verify.java b/src/java/org/apache/cassandra/tools/nodetool/Verify.java +index c449366..75cb109 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Verify.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Verify.java @@ -17,9 +17,9 @@ */ package org.apache.cassandra.tools.nodetool; @@ -1223,9 +1385,10 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ve import java.util.ArrayList; import java.util.List; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Version.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Version.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Version.java 2016-10-17 13:11:26.305759928 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/Version.java 2016-10-17 10:58:46.186127075 +0200 +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Version.java b/src/java/org/apache/cassandra/tools/nodetool/Version.java +index 2495508..395a247 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Version.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Version.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.tools.nodetool; @@ -1235,10 +1398,11 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Ve import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool.NodeToolCmd; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ViewBuildStatus.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ViewBuildStatus.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/ViewBuildStatus.java 2016-09-26 16:02:27.000000000 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/nodetool/ViewBuildStatus.java 2016-10-17 13:13:41.405084309 +0200 -@@ -22,8 +22,8 @@ +diff --git a/src/java/org/apache/cassandra/tools/nodetool/ViewBuildStatus.java b/src/java/org/apache/cassandra/tools/nodetool/ViewBuildStatus.java +index 0696396..b432b68 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/ViewBuildStatus.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/ViewBuildStatus.java +@@ -22,8 +22,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -1249,15 +1413,6 @@ diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/nodetool/Vi import org.apache.cassandra.tools.NodeProbe; import org.apache.cassandra.tools.NodeTool; import org.apache.cassandra.tools.nodetool.formatter.TableBuilder; -diff -ur cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/NodeTool.java cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/NodeTool.java ---- cassandra-cassandra-3.9/src/java/org/apache/cassandra/tools/NodeTool.java 2016-10-17 13:11:26.305759928 +0200 -+++ cassandra-cassandra-3.9new/src/java/org/apache/cassandra/tools/NodeTool.java 2016-10-17 10:58:46.186127075 +0200 -@@ -28,7 +28,7 @@ - import com.google.common.base.Throwables; - import com.google.common.collect.*; - --import io.airlift.command.*; -+import io.airlift.airline.*; - - import org.apache.cassandra.locator.EndpointSnitchInfoMBean; - import org.apache.cassandra.tools.nodetool.*; +-- +2.9.3 + diff --git a/cassandra-3.9-build.patch b/cassandra-3.9-build.patch index 9591f73..385125e 100644 --- a/cassandra-3.9-build.patch +++ b/cassandra-3.9-build.patch @@ -1,6 +1,16 @@ -diff -urN cassandra-cassandra-3.9/build.xml cassandra-cassandra-3.9new/build.xml ---- cassandra-cassandra-3.9/build.xml 2016-11-30 11:59:19.400942783 +0100 -+++ cassandra-cassandra-3.9new/build.xml 2016-12-01 10:49:05.383549756 +0100 +From de036200face11b9971f55fc1c2ff53dc63383a1 Mon Sep 17 00:00:00 2001 +From: Tomas Repik +Date: Mon, 27 Mar 2017 08:23:29 +0200 +Subject: [PATCH] refine build patch + +--- + build.xml | 42 ++++++++++++++++++++---------------------- + 1 file changed, 20 insertions(+), 22 deletions(-) + +diff --git a/build.xml b/build.xml +index 1393a8a..65fbe8b 100644 +--- a/build.xml ++++ b/build.xml @@ -17,7 +17,7 @@ ~ specific language governing permissions and limitations ~ under the License. @@ -10,7 +20,7 @@ diff -urN cassandra-cassandra-3.9/build.xml cassandra-cassandra-3.9new/build.xml xmlns:artifact="antlib:org.apache.maven.artifact.ant"> -@@ -76,7 +76,7 @@ +@@ -73,7 +73,7 @@ @@ -19,17 +29,17 @@ diff -urN cassandra-cassandra-3.9/build.xml cassandra-cassandra-3.9new/build.xml -@@ -173,7 +173,8 @@ +@@ -169,7 +169,8 @@ windowtitle="${ant.project.name} API" classpathref="cassandra.classpath" bottom="Copyright &copy; ${YEAR} The Apache Software Foundation" useexternalfile="yes" - maxmemory="256m"> + maxmemory="256m" -+ encoding="utf-8"> ++ encoding="utf-8"> -@@ -224,7 +225,7 @@ +@@ -219,7 +220,7 @@ Building Grammar ${build.src.antlr}/Cql.g ... -@@ -267,7 +268,7 @@ +@@ -262,7 +263,7 @@ grammar files --> @@ -47,19 +57,16 @@ diff -urN cassandra-cassandra-3.9/build.xml cassandra-cassandra-3.9new/build.xml -@@ -383,9 +384,8 @@ +@@ -378,7 +379,7 @@ - -- -- -+ -+ - - - -@@ -398,7 +398,7 @@ ++ + + + +@@ -386,7 +387,7 @@ @@ -68,16 +75,16 @@ diff -urN cassandra-cassandra-3.9/build.xml cassandra-cassandra-3.9new/build.xml -@@ -430,7 +430,7 @@ +@@ -417,7 +418,7 @@ - -+ ++ -@@ -446,8 +446,7 @@ +@@ -433,8 +434,7 @@ @@ -87,7 +94,7 @@ diff -urN cassandra-cassandra-3.9/build.xml cassandra-cassandra-3.9new/build.xml -@@ -489,11 +488,11 @@ +@@ -476,11 +476,11 @@ version="${version}"/> @@ -101,7 +108,7 @@ diff -urN cassandra-cassandra-3.9/build.xml cassandra-cassandra-3.9new/build.xml -@@ -515,7 +514,7 @@ +@@ -502,7 +502,7 @@ artifactId="cassandra-parent" version="${version}"/> @@ -110,17 +117,17 @@ diff -urN cassandra-cassandra-3.9/build.xml cassandra-cassandra-3.9new/build.xml -@@ -577,7 +576,7 @@ +@@ -564,7 +564,7 @@ - -+ ++ -@@ -593,11 +592,11 @@ - +@@ -576,11 +576,11 @@ + - @@ -133,17 +140,7 @@ diff -urN cassandra-cassandra-3.9/build.xml cassandra-cassandra-3.9new/build.xml -@@ -632,8 +631,7 @@ - - - -- -- -+ - - - -@@ -799,10 +797,10 @@ +@@ -714,10 +714,10 @@ The build target builds all the .class files --> -@@ -873,7 +871,7 @@ +@@ -778,7 +778,7 @@ The jar target makes cassandra.jar output. --> - -@@ -913,8 +911,6 @@ + - +- value="${ant.project.name}-clientutil-${version}.jar" /> -@@ -947,7 +943,7 @@ +@@ -831,7 +829,7 @@ @@ -183,3 +180,6 @@ diff -urN cassandra-cassandra-3.9/build.xml cassandra-cassandra-3.9new/build.xml +-- +2.9.3 + diff --git a/cassandra-3.9-remove-thrift.patch b/cassandra-3.9-remove-thrift.patch new file mode 100644 index 0000000..356db63 --- /dev/null +++ b/cassandra-3.9-remove-thrift.patch @@ -0,0 +1,113330 @@ +From f5ed326fa38c533868495021ed59a232159988ab Mon Sep 17 00:00:00 2001 +From: Tomas Repik +Date: Thu, 23 Mar 2017 19:24:11 +0100 +Subject: [PATCH] thrift removal from 3.9 + +--- + NOTICE.txt | 7 - + bin/cassandra.ps1 | 3 +- + bin/cqlsh.py | 6 +- + build.xml | 166 +- + conf/cassandra-env.ps1 | 2 +- + conf/cassandra.yaml | 121 +- + conf/jvm.options | 6 - + conf/logback.xml | 1 - + doc/SASI.md | 3 +- + doc/convert_yaml_to_rst.py | 1 - + doc/source/cql/ddl.rst | 10 +- + doc/source/cql/index.rst | 3 +- + doc/source/operating/metrics.rst | 1 - + examples/hadoop_cql3_word_count/README.txt | 51 - + examples/hadoop_cql3_word_count/bin/word_count | 62 - + .../hadoop_cql3_word_count/bin/word_count_counters | 61 - + .../hadoop_cql3_word_count/bin/word_count_setup | 61 - + examples/hadoop_cql3_word_count/build.xml | 113 - + examples/hadoop_cql3_word_count/conf/logback.xml | 42 - + examples/hadoop_cql3_word_count/ivy.xml | 24 - + examples/hadoop_cql3_word_count/src/WordCount.java | 259 - + .../src/WordCountCounters.java | 168 - + .../hadoop_cql3_word_count/src/WordCountSetup.java | 181 - + ide/idea-iml-file.xml | 1 - + ide/idea/workspace.xml | 7 +- + interface/cassandra.thrift | 945 - + .../cassandra/thrift/AuthenticationException.java | 413 - + .../cassandra/thrift/AuthenticationRequest.java | 465 - + .../cassandra/thrift/AuthorizationException.java | 413 - + .../org/apache/cassandra/thrift/CASResult.java | 574 - + .../org/apache/cassandra/thrift/Cassandra.java | 55794 ------------------- + .../org/apache/cassandra/thrift/CfDef.java | 4927 -- + .../org/apache/cassandra/thrift/CfSplit.java | 614 - + .../org/apache/cassandra/thrift/Column.java | 754 - + .../org/apache/cassandra/thrift/ColumnDef.java | 915 - + .../cassandra/thrift/ColumnOrSuperColumn.java | 771 - + .../org/apache/cassandra/thrift/ColumnParent.java | 538 - + .../org/apache/cassandra/thrift/ColumnPath.java | 660 - + .../org/apache/cassandra/thrift/ColumnSlice.java | 551 - + .../org/apache/cassandra/thrift/Compression.java | 69 - + .../apache/cassandra/thrift/ConsistencyLevel.java | 137 - + .../org/apache/cassandra/thrift/CounterColumn.java | 521 - + .../cassandra/thrift/CounterSuperColumn.java | 576 - + .../org/apache/cassandra/thrift/CqlMetadata.java | 817 - + .../apache/cassandra/thrift/CqlPreparedResult.java | 821 - + .../org/apache/cassandra/thrift/CqlResult.java | 807 - + .../org/apache/cassandra/thrift/CqlResultType.java | 69 - + .../org/apache/cassandra/thrift/CqlRow.java | 584 - + .../org/apache/cassandra/thrift/Deletion.java | 645 - + .../apache/cassandra/thrift/EndpointDetails.java | 630 - + .../org/apache/cassandra/thrift/IndexClause.java | 681 - + .../apache/cassandra/thrift/IndexExpression.java | 650 - + .../org/apache/cassandra/thrift/IndexOperator.java | 75 - + .../org/apache/cassandra/thrift/IndexType.java | 69 - + .../cassandra/thrift/InvalidRequestException.java | 414 - + .../org/apache/cassandra/thrift/KeyCount.java | 521 - + .../org/apache/cassandra/thrift/KeyRange.java | 1034 - + .../org/apache/cassandra/thrift/KeySlice.java | 583 - + .../org/apache/cassandra/thrift/KsDef.java | 1047 - + .../apache/cassandra/thrift/MultiSliceRequest.java | 1042 - + .../org/apache/cassandra/thrift/Mutation.java | 537 - + .../apache/cassandra/thrift/NotFoundException.java | 307 - + .../thrift/SchemaDisagreementException.java | 310 - + .../apache/cassandra/thrift/SlicePredicate.java | 588 - + .../org/apache/cassandra/thrift/SliceRange.java | 749 - + .../org/apache/cassandra/thrift/SuperColumn.java | 582 - + .../apache/cassandra/thrift/TimedOutException.java | 671 - + .../org/apache/cassandra/thrift/TokenRange.java | 990 - + .../org/apache/cassandra/thrift/TriggerDef.java | 568 - + .../cassandra/thrift/UnavailableException.java | 307 - + .../cassandra/thrift/cassandraConstants.java | 61 - + lib/licenses/disruptor-3.0.1.txt | 201 - + lib/licenses/libthrift-0.9.2.txt | 202 - + lib/licenses/thrift-server-0.3.7.txt | 202 - + pylib/cqlshlib/test/test_cql_parsing.py | 2 +- + pylib/cqlshlib/test/test_cqlsh_commands.py | 2 +- + pylib/cqlshlib/test/test_cqlsh_completion.py | 2 +- + pylib/cqlshlib/test/test_cqlsh_invocation.py | 2 +- + pylib/cqlshlib/test/test_cqlsh_output.py | 2 +- + pylib/cqlshlib/test/test_cqlsh_parsing.py | 2 +- + .../org/apache/cassandra/auth/IAuthenticator.java | 10 +- + .../apache/cassandra/cache/CounterCacheKey.java | 94 +- + .../org/apache/cassandra/client/RingCache.java | 121 - + .../org/apache/cassandra/config/CFMetaData.java | 96 +- + .../apache/cassandra/config/ColumnDefinition.java | 41 +- + src/java/org/apache/cassandra/config/Config.java | 27 - + .../cassandra/config/DatabaseDescriptor.java | 139 - + .../cassandra/config/RequestSchedulerOptions.java | 33 - + src/java/org/apache/cassandra/cql3/CQL3Type.java | 2 +- + .../cql3/CustomPayloadMirroringQueryHandler.java | 5 - + .../org/apache/cassandra/cql3/QueryHandler.java | 2 - + .../org/apache/cassandra/cql3/QueryOptions.java | 8 +- + .../org/apache/cassandra/cql3/QueryProcessor.java | 100 +- + src/java/org/apache/cassandra/cql3/ResultSet.java | 43 - + .../apache/cassandra/cql3/UpdateParameters.java | 8 +- + src/java/org/apache/cassandra/cql3/Validation.java | 117 + + .../cql3/statements/AlterTableStatement.java | 17 +- + .../cql3/statements/AlterViewStatement.java | 5 +- + .../cassandra/cql3/statements/BatchStatement.java | 5 +- + .../cql3/statements/CreateAggregateStatement.java | 3 +- + .../cql3/statements/CreateFunctionStatement.java | 4 +- + .../cql3/statements/CreateIndexStatement.java | 4 +- + .../cql3/statements/CreateKeyspaceStatement.java | 4 +- + .../cql3/statements/CreateTableStatement.java | 8 +- + .../cql3/statements/CreateTriggerStatement.java | 4 +- + .../cql3/statements/CreateViewStatement.java | 3 +- + .../cql3/statements/DropAggregateStatement.java | 4 +- + .../cql3/statements/DropFunctionStatement.java | 4 +- + .../cql3/statements/DropKeyspaceStatement.java | 4 +- + .../cql3/statements/DropTriggerStatement.java | 4 +- + .../cql3/statements/ModificationStatement.java | 9 +- + .../cassandra/cql3/statements/SelectStatement.java | 3 +- + .../cql3/statements/TruncateStatement.java | 3 +- + .../cassandra/cql3/statements/UpdateStatement.java | 9 +- + .../org/apache/cassandra/db/BufferClustering.java | 5 +- + .../org/apache/cassandra/db/ColumnFamilyStore.java | 4 +- + .../org/apache/cassandra/db/CompactTables.java | 36 +- + .../org/apache/cassandra/db/EmptyIterators.java | 13 +- + src/java/org/apache/cassandra/db/LegacyLayout.java | 2380 - + src/java/org/apache/cassandra/db/Memtable.java | 13 +- + .../cassandra/db/PartitionRangeReadCommand.java | 22 +- + src/java/org/apache/cassandra/db/ReadCommand.java | 1071 +- + src/java/org/apache/cassandra/db/ReadResponse.java | 277 +- + .../org/apache/cassandra/db/RowIndexEntry.java | 257 +- + src/java/org/apache/cassandra/db/Serializers.java | 183 - + .../cassandra/db/SinglePartitionReadCommand.java | 64 +- + .../apache/cassandra/db/SizeEstimatesRecorder.java | 3 +- + src/java/org/apache/cassandra/db/StorageHook.java | 19 +- + .../org/apache/cassandra/db/SystemKeyspace.java | 10 +- + .../cassandra/db/UnfilteredDeserializer.java | 437 +- + .../db/columniterator/AbstractSSTableIterator.java | 38 +- + .../db/columniterator/SSTableIterator.java | 3 +- + .../db/columniterator/SSTableReversedIterator.java | 3 +- + .../db/compaction/CompactionIterator.java | 14 +- + .../db/compaction/LeveledCompactionStrategy.java | 5 - + .../org/apache/cassandra/db/filter/DataLimits.java | 274 +- + .../org/apache/cassandra/db/filter/RowFilter.java | 147 +- + .../cassandra/db/marshal/CollectionType.java | 3 +- + .../db/marshal/ColumnToCollectionType.java | 153 - + .../apache/cassandra/db/marshal/CompositeType.java | 163 +- + .../db/partitions/CachedBTreePartition.java | 49 +- + .../cassandra/db/partitions/CachedPartition.java | 20 - + .../cassandra/db/partitions/PartitionUpdate.java | 43 +- + .../cassandra/db/partitions/PurgeFunction.java | 6 +- + .../SingletonUnfilteredPartitionIterator.java | 9 +- + .../db/partitions/UnfilteredPartitionIterator.java | 11 - + .../partitions/UnfilteredPartitionIterators.java | 24 +- + .../rows/UnfilteredRowIteratorWithLowerBound.java | 18 +- + .../cassandra/db/rows/UnfilteredRowIterators.java | 6 - + .../org/apache/cassandra/db/transform/Filter.java | 2 +- + .../cassandra/db/transform/FilteredPartitions.java | 2 +- + .../db/transform/UnfilteredPartitions.java | 8 - + .../org/apache/cassandra/hadoop/ConfigHelper.java | 225 +- + .../cassandra/hadoop/cql3/CqlInputFormat.java | 26 +- + .../index/internal/CassandraIndexFunctions.java | 2 +- + .../cassandra/index/internal/IndexEntry.java | 2 +- + .../internal/composites/CompositesSearcher.java | 5 - + .../index/internal/keys/KeysSearcher.java | 74 +- + .../cassandra/index/sasi/plan/QueryController.java | 8 +- + .../cassandra/index/sasi/plan/QueryPlan.java | 5 - + .../org/apache/cassandra/io/sstable/IndexInfo.java | 78 +- + .../io/sstable/SSTableSimpleIterator.java | 76 +- + .../cassandra/io/sstable/format/SSTableReader.java | 10 +- + .../cassandra/io/sstable/format/big/BigFormat.java | 2 +- + .../io/sstable/format/big/BigTableReader.java | 14 +- + .../io/sstable/format/big/BigTableScanner.java | 28 +- + .../cassandra/scheduler/IRequestScheduler.java | 41 - + .../apache/cassandra/scheduler/NoScheduler.java | 37 - + .../cassandra/scheduler/RoundRobinScheduler.java | 161 - + .../apache/cassandra/scheduler/WeightedQueue.java | 74 - + .../cassandra/schema/LegacySchemaMigrator.java | 1101 - + .../org/apache/cassandra/service/CacheService.java | 51 +- + .../apache/cassandra/service/CassandraDaemon.java | 24 - + .../org/apache/cassandra/service/ClientState.java | 6 +- + .../service/EmbeddedCassandraService.java | 3 +- + .../org/apache/cassandra/service/StorageProxy.java | 7 +- + .../apache/cassandra/service/StorageService.java | 66 +- + .../cassandra/service/StorageServiceMBean.java | 11 +- + .../org/apache/cassandra/service/TokenRange.java | 119 + + .../cassandra/service/pager/PagingState.java | 86 +- + .../service/pager/PartitionRangeQueryPager.java | 3 - + .../cassandra/service/pager/QueryPagers.java | 65 - + .../service/pager/SinglePartitionPager.java | 5 +- + .../apache/cassandra/thrift/CassandraServer.java | 2621 - + .../cassandra/thrift/CustomTNonBlockingServer.java | 91 - + .../cassandra/thrift/CustomTThreadPoolServer.java | 288 - + .../apache/cassandra/thrift/ITransportFactory.java | 64 - + .../cassandra/thrift/SSLTransportFactory.java | 88 - + .../thrift/TCustomNonblockingServerSocket.java | 87 - + .../cassandra/thrift/TCustomServerSocket.java | 189 - + .../org/apache/cassandra/thrift/TCustomSocket.java | 210 - + .../cassandra/thrift/TFramedTransportFactory.java | 56 - + .../cassandra/thrift/THsHaDisruptorServer.java | 109 - + .../cassandra/thrift/TServerCustomFactory.java | 74 - + .../apache/cassandra/thrift/TServerFactory.java | 44 - + .../apache/cassandra/thrift/ThriftClientState.java | 56 - + .../apache/cassandra/thrift/ThriftConversion.java | 726 - + .../cassandra/thrift/ThriftResultsMerger.java | 294 - + .../org/apache/cassandra/thrift/ThriftServer.java | 146 - + .../cassandra/thrift/ThriftSessionManager.java | 85 - + .../apache/cassandra/thrift/ThriftValidation.java | 671 - + src/java/org/apache/cassandra/tools/NodeProbe.java | 15 - + src/java/org/apache/cassandra/tools/NodeTool.java | 3 - + .../cassandra/tools/nodetool/DisableThrift.java | 33 - + .../cassandra/tools/nodetool/EnableThrift.java | 33 - + .../org/apache/cassandra/tools/nodetool/Info.java | 1 - + .../cassandra/tools/nodetool/StatusThrift.java | 36 - + .../transport/messages/ResultMessage.java | 57 +- + .../cassandra/utils/BatchRemoveIterator.java | 32 - + .../org/apache/cassandra/utils/ByteBufferUtil.java | 9 + + test/conf/cassandra-murmur.yaml | 3 - + test/conf/cassandra.yaml | 3 - + test/resources/functions/configure_cassandra.sh | 43 +- + .../cassandra/OffsetAwareConfigurationLoader.java | 1 - + test/unit/org/apache/cassandra/Util.java | 1 - + .../org/apache/cassandra/client/TestRingCache.java | 121 - + .../apache/cassandra/config/CFMetaDataTest.java | 72 - + .../cassandra/config/ColumnDefinitionTest.java | 55 - + .../cassandra/config/DatabaseDescriptorTest.java | 29 - + test/unit/org/apache/cassandra/cql3/CQLTester.java | 2 +- + .../cassandra/cql3/ThriftCompatibilityTest.java | 112 - + .../validation/entities/SecondaryIndexTest.java | 10 +- + .../cassandra/cql3/validation/entities/UFTest.java | 20 +- + .../validation/operations/AggregationTest.java | 4 +- + .../cql3/validation/operations/CreateTest.java | 7 - + .../apache/cassandra/db/ColumnFamilyStoreTest.java | 87 - + .../apache/cassandra/db/LegacyCellNameTest.java | 81 - + .../cassandra/db/PartitionRangeReadTest.java | 206 - + .../org/apache/cassandra/db/ReadResponseTest.java | 99 - + .../org/apache/cassandra/db/RowIndexEntryTest.java | 45 +- + .../db/SinglePartitionSliceCommandTest.java | 57 +- + .../cassandra/db/compaction/TTLExpiryTest.java | 2 +- + .../apache/cassandra/db/composites/CTypeTest.java | 80 +- + .../cassandra/db/marshal/CompositeTypeTest.java | 9 +- + .../hadoop/ColumnFamilyInputFormatTest.java | 52 - + .../apache/cassandra/index/sasi/SASIIndexTest.java | 15 +- + .../io/sstable/SSTableCorruptionDetectionTest.java | 2 +- + .../cassandra/io/sstable/SSTableScannerTest.java | 2 +- + .../cassandra/io/sstable/SSTableWriterTest.java | 2 +- + .../io/sstable/format/ClientModeSSTableTest.java | 133 - + .../cassandra/schema/LegacySchemaMigratorTest.java | 843 - + .../cassandra/schema/SchemaKeyspaceTest.java | 62 - + .../apache/cassandra/service/DataResolverTest.java | 10 +- + .../service/EmbeddedCassandraServiceTest.java | 129 - + .../cassandra/transport/MessagePayloadTest.java | 5 - + .../apache/cassandra/triggers/TriggersTest.java | 131 - + tools/bin/cassandra.in.bat | 2 +- + tools/bin/cassandra.in.sh | 1 - + tools/stress/README.txt | 2 +- + .../src/org/apache/cassandra/stress/Operation.java | 11 +- + .../org/apache/cassandra/stress/StressAction.java | 10 +- + .../org/apache/cassandra/stress/StressProfile.java | 63 +- + .../stress/operations/predefined/CqlOperation.java | 148 +- + .../operations/predefined/PredefinedOperation.java | 71 +- + .../operations/predefined/ThriftCounterAdder.java | 94 - + .../operations/predefined/ThriftCounterGetter.java | 67 - + .../operations/predefined/ThriftInserter.java | 104 - + .../stress/operations/predefined/ThriftReader.java | 78 - + .../operations/userdefined/SchemaInsert.java | 36 +- + .../stress/operations/userdefined/SchemaQuery.java | 54 +- + .../operations/userdefined/SchemaStatement.java | 9 +- + .../operations/userdefined/TokenRangeQuery.java | 23 - + .../userdefined/ValidatingSchemaQuery.java | 85 +- + .../cassandra/stress/settings/CliOption.java | 2 +- + .../cassandra/stress/settings/ConnectionAPI.java | 2 +- + .../cassandra/stress/settings/ConnectionStyle.java | 12 +- + .../apache/cassandra/stress/settings/Legacy.java | 12 +- + .../cassandra/stress/settings/SettingsColumn.java | 1 - + .../cassandra/stress/settings/SettingsCommand.java | 2 +- + .../cassandra/stress/settings/SettingsMode.java | 46 +- + .../cassandra/stress/settings/SettingsNode.java | 2 - + .../cassandra/stress/settings/SettingsPort.java | 5 +- + .../cassandra/stress/settings/SettingsSchema.java | 85 +- + .../stress/settings/SettingsTransport.java | 64 +- + .../cassandra/stress/settings/StressSettings.java | 83 - + .../cassandra/stress/util/SimpleThriftClient.java | 111 - + .../cassandra/stress/util/SmartThriftClient.java | 282 - + .../apache/cassandra/stress/util/ThriftClient.java | 57 - + 278 files changed, 978 insertions(+), 106225 deletions(-) + delete mode 100644 examples/hadoop_cql3_word_count/README.txt + delete mode 100755 examples/hadoop_cql3_word_count/bin/word_count + delete mode 100755 examples/hadoop_cql3_word_count/bin/word_count_counters + delete mode 100755 examples/hadoop_cql3_word_count/bin/word_count_setup + delete mode 100644 examples/hadoop_cql3_word_count/build.xml + delete mode 100644 examples/hadoop_cql3_word_count/conf/logback.xml + delete mode 100644 examples/hadoop_cql3_word_count/ivy.xml + delete mode 100644 examples/hadoop_cql3_word_count/src/WordCount.java + delete mode 100644 examples/hadoop_cql3_word_count/src/WordCountCounters.java + delete mode 100644 examples/hadoop_cql3_word_count/src/WordCountSetup.java + delete mode 100644 interface/cassandra.thrift + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/AuthenticationException.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/AuthenticationRequest.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/AuthorizationException.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/CASResult.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/Cassandra.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/CfDef.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/CfSplit.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/Column.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnDef.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnOrSuperColumn.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnParent.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnPath.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnSlice.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/Compression.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/ConsistencyLevel.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/CounterColumn.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/CounterSuperColumn.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/CqlMetadata.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/CqlPreparedResult.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/CqlResult.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/CqlResultType.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/CqlRow.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/Deletion.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/EndpointDetails.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/IndexClause.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/IndexExpression.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/IndexOperator.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/IndexType.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/InvalidRequestException.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/KeyCount.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/KeyRange.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/KeySlice.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/KsDef.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/MultiSliceRequest.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/Mutation.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/NotFoundException.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/SchemaDisagreementException.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/SlicePredicate.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/SliceRange.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/SuperColumn.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/TimedOutException.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/TokenRange.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/TriggerDef.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/UnavailableException.java + delete mode 100644 interface/thrift/gen-java/org/apache/cassandra/thrift/cassandraConstants.java + delete mode 100644 lib/licenses/disruptor-3.0.1.txt + delete mode 100644 lib/licenses/libthrift-0.9.2.txt + delete mode 100644 lib/licenses/thrift-server-0.3.7.txt + delete mode 100644 src/java/org/apache/cassandra/client/RingCache.java + delete mode 100644 src/java/org/apache/cassandra/config/RequestSchedulerOptions.java + create mode 100644 src/java/org/apache/cassandra/cql3/Validation.java + delete mode 100644 src/java/org/apache/cassandra/db/LegacyLayout.java + delete mode 100644 src/java/org/apache/cassandra/db/Serializers.java + delete mode 100644 src/java/org/apache/cassandra/db/marshal/ColumnToCollectionType.java + delete mode 100644 src/java/org/apache/cassandra/scheduler/IRequestScheduler.java + delete mode 100644 src/java/org/apache/cassandra/scheduler/NoScheduler.java + delete mode 100644 src/java/org/apache/cassandra/scheduler/RoundRobinScheduler.java + delete mode 100644 src/java/org/apache/cassandra/scheduler/WeightedQueue.java + delete mode 100644 src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java + create mode 100644 src/java/org/apache/cassandra/service/TokenRange.java + delete mode 100644 src/java/org/apache/cassandra/service/pager/QueryPagers.java + delete mode 100644 src/java/org/apache/cassandra/thrift/CassandraServer.java + delete mode 100644 src/java/org/apache/cassandra/thrift/CustomTNonBlockingServer.java + delete mode 100644 src/java/org/apache/cassandra/thrift/CustomTThreadPoolServer.java + delete mode 100644 src/java/org/apache/cassandra/thrift/ITransportFactory.java + delete mode 100644 src/java/org/apache/cassandra/thrift/SSLTransportFactory.java + delete mode 100644 src/java/org/apache/cassandra/thrift/TCustomNonblockingServerSocket.java + delete mode 100644 src/java/org/apache/cassandra/thrift/TCustomServerSocket.java + delete mode 100644 src/java/org/apache/cassandra/thrift/TCustomSocket.java + delete mode 100644 src/java/org/apache/cassandra/thrift/TFramedTransportFactory.java + delete mode 100644 src/java/org/apache/cassandra/thrift/THsHaDisruptorServer.java + delete mode 100644 src/java/org/apache/cassandra/thrift/TServerCustomFactory.java + delete mode 100644 src/java/org/apache/cassandra/thrift/TServerFactory.java + delete mode 100644 src/java/org/apache/cassandra/thrift/ThriftClientState.java + delete mode 100644 src/java/org/apache/cassandra/thrift/ThriftConversion.java + delete mode 100644 src/java/org/apache/cassandra/thrift/ThriftResultsMerger.java + delete mode 100644 src/java/org/apache/cassandra/thrift/ThriftServer.java + delete mode 100644 src/java/org/apache/cassandra/thrift/ThriftSessionManager.java + delete mode 100644 src/java/org/apache/cassandra/thrift/ThriftValidation.java + delete mode 100644 src/java/org/apache/cassandra/tools/nodetool/DisableThrift.java + delete mode 100644 src/java/org/apache/cassandra/tools/nodetool/EnableThrift.java + delete mode 100644 src/java/org/apache/cassandra/tools/nodetool/StatusThrift.java + delete mode 100644 src/java/org/apache/cassandra/utils/BatchRemoveIterator.java + delete mode 100644 test/unit/org/apache/cassandra/client/TestRingCache.java + delete mode 100644 test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java + delete mode 100644 test/unit/org/apache/cassandra/cql3/ThriftCompatibilityTest.java + delete mode 100644 test/unit/org/apache/cassandra/db/LegacyCellNameTest.java + delete mode 100644 test/unit/org/apache/cassandra/db/ReadResponseTest.java + delete mode 100644 test/unit/org/apache/cassandra/hadoop/ColumnFamilyInputFormatTest.java + delete mode 100644 test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java + delete mode 100644 test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java + delete mode 100644 test/unit/org/apache/cassandra/service/EmbeddedCassandraServiceTest.java + delete mode 100644 tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftCounterAdder.java + delete mode 100644 tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftCounterGetter.java + delete mode 100644 tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftInserter.java + delete mode 100644 tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftReader.java + delete mode 100644 tools/stress/src/org/apache/cassandra/stress/util/SimpleThriftClient.java + delete mode 100644 tools/stress/src/org/apache/cassandra/stress/util/SmartThriftClient.java + delete mode 100644 tools/stress/src/org/apache/cassandra/stress/util/ThriftClient.java + +diff --git a/NOTICE.txt b/NOTICE.txt +index 1c552fc..789d334 100644 +--- a/NOTICE.txt ++++ b/NOTICE.txt +@@ -46,13 +46,6 @@ Written by Adrien Grand. + Contains bindings to the C LZ4 implementation (http://code.google.com/p/lz4/) + Copyright (C) 2011-2012, Yann Collet. + +-Alternative Disruptor backed thrift server from https://github.com/xedin/disruptor_thrift_server +-Written by Pavel Yaskevich. +- +-LMAX Disruptor +-(http://lmax-exchange.github.io/disruptor/) +-Copyright 2011 LMAX Ltd. +- + Airline + (https://github.com/airlift/airline) + Copyright 2011, Dain Sundstrom dain@iq80.com +diff --git a/bin/cassandra.ps1 b/bin/cassandra.ps1 +index 5d10994..d968793 100644 +--- a/bin/cassandra.ps1 ++++ b/bin/cassandra.ps1 +@@ -298,9 +298,8 @@ Function VerifyPortsAreAvailable + # storage_port + # ssl_storage_port + # native_transport_port +- # rpc_port, which we'll match to rpc_address + # and from env: JMX_PORT which we cache in our environment during SetCassandraEnvironment for this check +- $yamlRegex = "storage_port:|ssl_storage_port:|native_transport_port:|rpc_port" ++ $yamlRegex = "storage_port:|ssl_storage_port:|native_transport_port:" + $yaml = Get-Content "$env:CASSANDRA_CONF\cassandra.yaml" + $portRegex = ":$env:JMX_PORT |" + +diff --git a/bin/cqlsh.py b/bin/cqlsh.py +index ce85449..fd635b1 100644 +--- a/bin/cqlsh.py ++++ b/bin/cqlsh.py +@@ -104,7 +104,7 @@ elif webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', ' + webbrowser._tryorder.remove('xdg-open') + webbrowser._tryorder.append('xdg-open') + +-# use bundled libs for python-cql and thrift, if available. if there ++# use bundled lib for python-cql if available. if there + # is a ../lib dir, use bundled libs there preferentially. + ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')] + myplatform = platform.system() +@@ -1914,8 +1914,8 @@ class Shell(cmd.Cmd): + SHOW VERSION + + Shows the version and build of the connected Cassandra instance, as +- well as the versions of the CQL spec and the Thrift protocol that +- the connected Cassandra instance understands. ++ well as the version of the CQL spec that the connected Cassandra ++ instance understands. + + SHOW HOST + +diff --git a/build.xml b/build.xml +index ed40eb6..d9eaa01 100644 +--- a/build.xml ++++ b/build.xml +@@ -44,12 +44,9 @@ + + + +- + + + +- +- + + + +@@ -154,7 +151,6 @@ + + + +- + + + +@@ -186,7 +182,6 @@ + + +- + + + +@@ -386,14 +381,7 @@ + + + +- +- +- +- + +- +- +- + + + +@@ -421,7 +409,6 @@ + + + +- + + + +@@ -583,15 +570,11 @@ + + + +- + + + + + +- +- +- + + + +@@ -617,26 +600,6 @@ + + + +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- + + + +- +- +- +- +- +- Generating Thrift Java code from ${basedir}/interface/cassandra.thrift... +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- Generating Thrift Python code from ${basedir}/interface/cassandra.thrift... +- +- +- +- +- +- +- +- +- +- +- +- + + + +@@ -808,13 +723,6 @@ + + +- +- +- +- + +@@ -836,7 +744,6 @@ + + + +- + + + +@@ -858,8 +765,6 @@ + + + +- + + +@@ -876,35 +781,14 @@ + depends="build, build-test, stress-build, write-poms" + description="Assemble Cassandra JAR files"> + +- + +- + +- +- +- +- +- +- +- +- +- +- +- +- +- +- + + + + +- +- + + + +@@ -914,7 +798,7 @@ + + ++ value="${ant.project.name}-clientutil-${version}.jar" /> + + + +@@ -957,16 +841,6 @@ + --> + + +- +- +- +- +- +- +- +- +- + + + +@@ -1001,11 +875,6 @@ + The sources-jar target makes cassandra-sources.jar output required for publishing to Maven central repository. + --> + +- +- +- +- +- + + + +@@ -1036,7 +905,6 @@ + + + +- + + + +@@ -1060,11 +928,6 @@ + + + +- +- +- +- +- + + + +@@ -1295,8 +1158,6 @@ + + + +- +- + + + +@@ -1644,9 +1505,6 @@ + + + +- +- +- + + + +@@ -1824,14 +1682,12 @@ + + + +- + + + + + + +- + + + ]]> +@@ -1944,16 +1800,6 @@ + packaging="tar.gz" + classifier="src"/> + +- +- +- +- +- + + +@@ -1998,16 +1844,6 @@ + packaging="tar.gz" + classifier="src"/> + +- +- +- +- +- + + +diff --git a/conf/cassandra-env.ps1 b/conf/cassandra-env.ps1 +index 9e2f50d..d6b4f34 100644 +--- a/conf/cassandra-env.ps1 ++++ b/conf/cassandra-env.ps1 +@@ -47,7 +47,7 @@ Function BuildClassPath + } + + # Add build/classes/main so it works in development +- $cp = $cp + ";" + """$env:CASSANDRA_HOME\build\classes\main"";""$env:CASSANDRA_HOME\build\classes\thrift""" ++ $cp = $cp + ";" + """$env:CASSANDRA_HOME\build\classes\main""" + $env:CLASSPATH=$cp + } + +diff --git a/conf/cassandra.yaml b/conf/cassandra.yaml +index aaabc2b..8f3897f 100644 +--- a/conf/cassandra.yaml ++++ b/conf/cassandra.yaml +@@ -1,4 +1,4 @@ +-# Cassandra storage config YAML ++assandra storage config YAML + + # NOTE: + # See http://wiki.apache.org/cassandra/StorageConfiguration for +@@ -232,10 +232,10 @@ disk_failure_policy: stop + # Policy for commit disk failures: + # + # die +-# shut down gossip and Thrift and kill the JVM, so the node can be replaced. ++# shut down the node and kill the JVM, so the node can be replaced. + # + # stop +-# shut down gossip and Thrift, leaving the node effectively dead, but ++# shut down the node, leaving the node effectively dead, but + # can still be inspected via JMX. + # + # stop_commit +@@ -265,15 +265,6 @@ commit_failure_policy: stop + # Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater + prepared_statements_cache_size_mb: + +-# Maximum size of the Thrift prepared statement cache +-# +-# If you do not use Thrift at all, it is safe to leave this value at "auto". +-# +-# See description of 'prepared_statements_cache_size_mb' above for more information. +-# +-# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +-thrift_prepared_statements_cache_size_mb: +- + # Maximum size of the key cache in memory. + # + # Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +@@ -601,8 +592,7 @@ listen_address: localhost + # internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + + # Whether to start the native transport server. +-# Please note that the address on which the native transport is bound is the +-# same as the rpc_address. The port however is different and specified below. ++# The address on which the native transport is bound is defined by rpc_address. + start_native_transport: true + # port for the CQL native transport to listen for clients on + # For security reasons, you should not expose this port to the internet. Firewall it if needed. +@@ -615,10 +605,8 @@ native_transport_port: 9042 + # from native_transport_port will use encryption for native_transport_port_ssl while + # keeping native_transport_port unencrypted. + # native_transport_port_ssl: 9142 +-# The maximum threads for handling requests when the native transport is used. +-# This is similar to rpc_max_threads though the default differs slightly (and +-# there is no native_transport_min_threads, idle threads will always be stopped +-# after 30 seconds). ++# The maximum threads for handling requests (note that idle threads are stopped ++# after 30 seconds so there is not corresponding minimum setting). + # native_transport_max_threads: 128 + # + # The maximum size of allowed frame. Frame (requests) larger than this will +@@ -634,11 +622,7 @@ native_transport_port: 9042 + # The default is -1, which means unlimited. + # native_transport_max_concurrent_connections_per_ip: -1 + +-# Whether to start the thrift rpc server. +-start_rpc: false +- +-# The address or interface to bind the Thrift RPC service and native transport +-# server to. ++# The address or interface to bind the native transport server to. + # + # Set rpc_address OR rpc_interface, not both. + # +@@ -661,9 +645,6 @@ rpc_address: localhost + # ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. + # rpc_interface_prefer_ipv6: false + +-# port for Thrift to listen for clients on +-rpc_port: 9160 +- + # RPC address to broadcast to drivers and other Cassandra nodes. This cannot + # be set to 0.0.0.0. If left blank, this will be set to the value of + # rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +@@ -673,45 +654,6 @@ rpc_port: 9160 + # enable or disable keepalive on rpc/native connections + rpc_keepalive: true + +-# Cassandra provides two out-of-the-box options for the RPC Server: +-# +-# sync +-# One thread per thrift connection. For a very large number of clients, memory +-# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +-# per thread, and that will correspond to your use of virtual memory (but physical memory +-# may be limited depending on use of stack space). +-# +-# hsha +-# Stands for "half synchronous, half asynchronous." All thrift clients are handled +-# asynchronously using a small number of threads that does not vary with the amount +-# of thrift clients (and thus scales well to many clients). The rpc requests are still +-# synchronous (one thread per active request). If hsha is selected then it is essential +-# that rpc_max_threads is changed from the default value of unlimited. +-# +-# The default is sync because on Windows hsha is about 30% slower. On Linux, +-# sync/hsha performance is about the same, with hsha of course using less memory. +-# +-# Alternatively, can provide your own RPC server by providing the fully-qualified class name +-# of an o.a.c.t.TServerFactory that can create an instance of it. +-rpc_server_type: sync +- +-# Uncomment rpc_min|max_thread to set request pool size limits. +-# +-# Regardless of your choice of RPC server (see above), the number of maximum requests in the +-# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +-# RPC server, it also dictates the number of clients that can be connected at all). +-# +-# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +-# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +-# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +-# +-# rpc_min_threads: 16 +-# rpc_max_threads: 2048 +- +-# uncomment to set socket buffer sizes on rpc connections +-# rpc_send_buff_size_in_bytes: +-# rpc_recv_buff_size_in_bytes: +- + # Uncomment to set socket buffer size for internode communication + # Note that when setting this, the buffer size is limited by net.core.wmem_max + # and when not setting it it is defined by net.ipv4.tcp_wmem +@@ -728,9 +670,6 @@ rpc_server_type: sync + # and when not setting it it is defined by net.ipv4.tcp_wmem + # internode_recv_buff_size_in_bytes: + +-# Frame size for thrift (maximum message length). +-thrift_framed_transport_size_in_mb: 15 +- + # Set to true to have Cassandra create a hard link to each sstable + # flushed or streamed locally in a backups/ subdirectory of the + # keyspace data. Removing these links is the operator's +@@ -934,52 +873,6 @@ dynamic_snitch_reset_interval_in_ms: 600000 + # until the pinned host was 20% worse than the fastest. + dynamic_snitch_badness_threshold: 0.1 + +-# request_scheduler -- Set this to a class that implements +-# RequestScheduler, which will schedule incoming client requests +-# according to the specific policy. This is useful for multi-tenancy +-# with a single Cassandra cluster. +-# NOTE: This is specifically for requests from the client and does +-# not affect inter node communication. +-# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +-# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +-# client requests to a node with a separate queue for each +-# request_scheduler_id. The scheduler is further customized by +-# request_scheduler_options as described below. +-request_scheduler: org.apache.cassandra.scheduler.NoScheduler +- +-# Scheduler Options vary based on the type of scheduler +-# +-# NoScheduler +-# Has no options +-# +-# RoundRobin +-# throttle_limit +-# The throttle_limit is the number of in-flight +-# requests per client. Requests beyond +-# that limit are queued up until +-# running requests can complete. +-# The value of 80 here is twice the number of +-# concurrent_reads + concurrent_writes. +-# default_weight +-# default_weight is optional and allows for +-# overriding the default which is 1. +-# weights +-# Weights are optional and will default to 1 or the +-# overridden default_weight. The weight translates into how +-# many requests are handled during each turn of the +-# RoundRobin, based on the scheduler id. +-# +-# request_scheduler_options: +-# throttle_limit: 80 +-# default_weight: 5 +-# weights: +-# Keyspace1: 1 +-# Keyspace2: 5 +- +-# request_scheduler_id -- An identifier based on which to perform +-# the request scheduling. Currently the only valid option is keyspace. +-# request_scheduler_id: keyspace +- + # Enable or disable inter-node encryption + # JVM defaults for supported SSL socket protocols and cipher suites can + # be replaced using custom encryption options. This is not recommended +diff --git a/conf/jvm.options b/conf/jvm.options +index 692d06b..6ccccd3 100644 +--- a/conf/jvm.options ++++ b/conf/jvm.options +@@ -53,18 +53,12 @@ + # before joining the ring. + #-Dcassandra.ring_delay_ms=ms + +-# Set the port for the Thrift RPC service, which is used for client connections. (Default: 9160) +-#-Dcassandra.rpc_port=port +- + # Set the SSL port for encrypted communication. (Default: 7001) + #-Dcassandra.ssl_storage_port=port + + # Enable or disable the native transport server. See start_native_transport in cassandra.yaml. + # cassandra.start_native_transport=true|false + +-# Enable or disable the Thrift RPC server. (Default: true) +-#-Dcassandra.start_rpc=true/false +- + # Set the port for inter-node communication. (Default: 7000) + #-Dcassandra.storage_port=port + +diff --git a/conf/logback.xml b/conf/logback.xml +index a47740d..085bf06 100644 +--- a/conf/logback.xml ++++ b/conf/logback.xml +@@ -97,5 +97,4 @@ appender reference in the root level section below. + + + +- + +diff --git a/doc/SASI.md b/doc/SASI.md +index a4762c9..7c530b1 100644 +--- a/doc/SASI.md ++++ b/doc/SASI.md +@@ -19,8 +19,7 @@ implementation. + ## Using SASI + + The examples below walk through creating a table and indexes on its +-columns, and performing queries on some inserted data. The patchset in +-this repository includes support for the Thrift and CQL3 interfaces. ++columns, and performing queries on some inserted data. + + The examples below assume the `demo` keyspace has been created and is + in use. +diff --git a/doc/convert_yaml_to_rst.py b/doc/convert_yaml_to_rst.py +index fee6d8c..3c95da2 100644 +--- a/doc/convert_yaml_to_rst.py ++++ b/doc/convert_yaml_to_rst.py +@@ -41,7 +41,6 @@ commented_re = re.compile(r"^# ?(.*)") + # that these can be commented out (making it useless to use a yaml parser). + COMPLEX_OPTIONS = ( + 'seed_provider', +- 'request_scheduler_options', + 'data_file_directories', + 'commitlog_compression', + 'hints_compression', +diff --git a/doc/source/cql/ddl.rst b/doc/source/cql/ddl.rst +index 7f3431a..918abb5 100644 +--- a/doc/source/cql/ddl.rst ++++ b/doc/source/cql/ddl.rst +@@ -396,15 +396,13 @@ Compact tables + + .. warning:: Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the + same schema obviously), and declaring a table compact **only** creates artificial limitations on the table definition +- and usage that are necessary to ensure backward compatibility with the deprecated Thrift API. And as ``COMPACT ++ and usage. It only exists for historical reason and is preserved for backward compatibility And as ``COMPACT + STORAGE`` cannot, as of Cassandra |version|, be removed, it is strongly discouraged to create new table with the + ``COMPACT STORAGE`` option. + +-A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is mainly targeted towards backward +-compatibility for definitions created before CQL version 3 (see `www.datastax.com/dev/blog/thrift-to-cql3 +-`__ for more details) and shouldn't be used for new tables. Declaring a +-table with this option creates limitations for the table which are largely arbitrary but necessary for backward +-compatibility with the (deprecated) Thrift API. Amongst those limitation: ++A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is only maintained for backward ++compatibility for definitions created before CQL version 3 and shouldn't be used for new tables. Declaring a ++table with this option creates limitations for the table which are largely arbitrary (and exist for historical reasons). Amongst those limitation: + + - a compact table cannot use collections nor static columns. + - if a compact table has at least one clustering column, then it must have *exactly* one column outside of the primary +diff --git a/doc/source/cql/index.rst b/doc/source/cql/index.rst +index 00d90e4..e6fc99b 100644 +--- a/doc/source/cql/index.rst ++++ b/doc/source/cql/index.rst +@@ -24,8 +24,7 @@ the languages. However, the `changes <#changes>`_ section provides the diff betw + + CQL offers a model close to SQL in the sense that data is put in *tables* containing *rows* of *columns*. For + that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have +-in SQL. But please note that as such, they do **not** refer to the concept of rows and columns found in the deprecated +-thrift API (and earlier version 1 and 2 of CQL). ++in SQL. + + .. toctree:: + :maxdepth: 2 +diff --git a/doc/source/operating/metrics.rst b/doc/source/operating/metrics.rst +index 6df1b71..3a9728b 100644 +--- a/doc/source/operating/metrics.rst ++++ b/doc/source/operating/metrics.rst +@@ -582,7 +582,6 @@ Reported name format: + Name Type Description + =========================== ============== =========== + connectedNativeClients Counter Number of clients connected to this nodes native protocol server +-connectedThriftClients Counter Number of clients connected to this nodes thrift protocol server + =========================== ============== =========== + + JVM Metrics +diff --git a/examples/hadoop_cql3_word_count/README.txt b/examples/hadoop_cql3_word_count/README.txt +deleted file mode 100644 +index b6ee33f..0000000 +--- a/examples/hadoop_cql3_word_count/README.txt ++++ /dev/null +@@ -1,51 +0,0 @@ +-Introduction +-============ +- +-WordCount hadoop example: Inserts a bunch of words across multiple rows, +-and counts them, with RandomPartitioner. The word_count_counters example sums +-the value of counter columns for a key. +- +-The scripts in bin/ assume you are running with cwd of examples/word_count. +- +- +-Running +-======= +- +-First build and start a Cassandra server with the default configuration*. Ensure that the Thrift +-interface is enabled, either by setting start_rpc:true in cassandra.yaml or by running +-`nodetool enablethrift` after startup. +-Once Cassandra has started and the Thrift interface is available, run +- +-contrib/word_count$ ant +-contrib/word_count$ bin/word_count_setup +-contrib/word_count$ bin/word_count +-contrib/word_count$ bin/word_count_counters +- +-In order to view the results in Cassandra, one can use bin/cqlsh and +-perform the following operations: +-$ bin/cqlsh localhost +-> use cql3_wordcount; +-> select * from output_words; +- +-The output of the word count can now be configured. In the bin/word_count +-file, you can specify the OUTPUT_REDUCER. The two options are 'filesystem' +-and 'cassandra'. The filesystem option outputs to the /tmp/word_count* +-directories. The cassandra option outputs to the 'output_words' column family +-in the 'cql3_wordcount' keyspace. 'cassandra' is the default. +- +-Read the code in src/ for more details. +- +-The word_count_counters example sums the counter columns for a row. The output +-is written to a text file in /tmp/word_count_counters. +- +-*It is recommended to turn off vnodes when running Cassandra with hadoop. +-This is done by setting "num_tokens: 1" in cassandra.yaml. If you want to +-point wordcount at a real cluster, modify the seed and listenaddress +-settings accordingly. +- +- +-Troubleshooting +-=============== +- +-word_count uses conf/logback.xml to log to wc.out. +- +diff --git a/examples/hadoop_cql3_word_count/bin/word_count b/examples/hadoop_cql3_word_count/bin/word_count +deleted file mode 100755 +index 76cca7d..0000000 +--- a/examples/hadoop_cql3_word_count/bin/word_count ++++ /dev/null +@@ -1,62 +0,0 @@ +-#!/bin/sh +- +-# Licensed to the Apache Software Foundation (ASF) under one +-# or more contributor license agreements. See the NOTICE file +-# distributed with this work for additional information +-# regarding copyright ownership. The ASF licenses this file +-# to you under the Apache License, Version 2.0 (the +-# "License"); you may not use this file except in compliance +-# with the License. You may obtain a copy of the License at +-# +-# http://www.apache.org/licenses/LICENSE-2.0 +-# +-# Unless required by applicable law or agreed to in writing, software +-# distributed under the License is distributed on an "AS IS" BASIS, +-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-# See the License for the specific language governing permissions and +-# limitations under the License. +- +-cwd=`dirname $0` +- +-# Cassandra class files. +-if [ ! -d $cwd/../../../build/classes/main ]; then +- echo "Unable to locate cassandra class files" >&2 +- exit 1 +-fi +- +-# word_count Jar. +-if [ ! -e $cwd/../build/word_count.jar ]; then +- echo "Unable to locate word_count jar" >&2 +- exit 1 +-fi +- +-CLASSPATH=$CLASSPATH:$cwd/../conf +-CLASSPATH=$CLASSPATH:$cwd/../build/word_count.jar +-CLASSPATH=$CLASSPATH:$cwd/../../../build/classes/main +-CLASSPATH=$CLASSPATH:$cwd/../../../build/classes/thrift +-for jar in $cwd/../build/lib/jars/*.jar; do +- CLASSPATH=$CLASSPATH:$jar +-done +-for jar in $cwd/../../../lib/*.jar; do +- CLASSPATH=$CLASSPATH:$jar +-done +-for jar in $cwd/../../../build/lib/jars/*.jar; do +- CLASSPATH=$CLASSPATH:$jar +-done +- +-if [ -x $JAVA_HOME/bin/java ]; then +- JAVA=$JAVA_HOME/bin/java +-else +- JAVA=`which java` +-fi +- +-if [ "x$JAVA" = "x" ]; then +- echo "Java executable not found (hint: set JAVA_HOME)" >&2 +- exit 1 +-fi +- +-OUTPUT_REDUCER=cassandra +-INPUT_MAPPER=native +- +-#echo $CLASSPATH +-"$JAVA" -Xmx1G -ea -cp "$CLASSPATH" WordCount output_reducer=$OUTPUT_REDUCER input_mapper=$INPUT_MAPPER +diff --git a/examples/hadoop_cql3_word_count/bin/word_count_counters b/examples/hadoop_cql3_word_count/bin/word_count_counters +deleted file mode 100755 +index cc1243f..0000000 +--- a/examples/hadoop_cql3_word_count/bin/word_count_counters ++++ /dev/null +@@ -1,61 +0,0 @@ +-#!/bin/sh +- +-# Licensed to the Apache Software Foundation (ASF) under one +-# or more contributor license agreements. See the NOTICE file +-# distributed with this work for additional information +-# regarding copyright ownership. The ASF licenses this file +-# to you under the Apache License, Version 2.0 (the +-# "License"); you may not use this file except in compliance +-# with the License. You may obtain a copy of the License at +-# +-# http://www.apache.org/licenses/LICENSE-2.0 +-# +-# Unless required by applicable law or agreed to in writing, software +-# distributed under the License is distributed on an "AS IS" BASIS, +-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-# See the License for the specific language governing permissions and +-# limitations under the License. +- +-cwd=`dirname $0` +- +-# Cassandra class files. +-if [ ! -d $cwd/../../../build/classes/main ]; then +- echo "Unable to locate cassandra class files" >&2 +- exit 1 +-fi +- +-# word_count Jar. +-if [ ! -e $cwd/../build/word_count.jar ]; then +- echo "Unable to locate word_count jar" >&2 +- exit 1 +-fi +- +-CLASSPATH=$CLASSPATH:$cwd/../conf +-CLASSPATH=$CLASSPATH:$cwd/../build/word_count.jar +-CLASSPATH=$CLASSPATH:$cwd/../../../build/classes/main +-CLASSPATH=$CLASSPATH:$cwd/../../../build/classes/thrift +-for jar in $cwd/../build/lib/jars/*.jar; do +- CLASSPATH=$CLASSPATH:$jar +-done +-for jar in $cwd/../../../lib/*.jar; do +- CLASSPATH=$CLASSPATH:$jar +-done +-for jar in $cwd/../../../build/lib/jars/*.jar; do +- CLASSPATH=$CLASSPATH:$jar +-done +- +-if [ -x $JAVA_HOME/bin/java ]; then +- JAVA=$JAVA_HOME/bin/java +-else +- JAVA=`which java` +-fi +- +-if [ "x$JAVA" = "x" ]; then +- echo "Java executable not found (hint: set JAVA_HOME)" >&2 +- exit 1 +-fi +- +-INPUT_MAPPER=native +- +-#echo $CLASSPATH +-"$JAVA" -Xmx1G -ea -cp "$CLASSPATH" WordCountCounters input_mapper=$INPUT_MAPPER +diff --git a/examples/hadoop_cql3_word_count/bin/word_count_setup b/examples/hadoop_cql3_word_count/bin/word_count_setup +deleted file mode 100755 +index 6e5650f..0000000 +--- a/examples/hadoop_cql3_word_count/bin/word_count_setup ++++ /dev/null +@@ -1,61 +0,0 @@ +-#!/bin/sh +- +-# Licensed to the Apache Software Foundation (ASF) under one +-# or more contributor license agreements. See the NOTICE file +-# distributed with this work for additional information +-# regarding copyright ownership. The ASF licenses this file +-# to you under the Apache License, Version 2.0 (the +-# "License"); you may not use this file except in compliance +-# with the License. You may obtain a copy of the License at +-# +-# http://www.apache.org/licenses/LICENSE-2.0 +-# +-# Unless required by applicable law or agreed to in writing, software +-# distributed under the License is distributed on an "AS IS" BASIS, +-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-# See the License for the specific language governing permissions and +-# limitations under the License. +- +-cwd=`dirname $0` +- +-# Cassandra class files. +-if [ ! -d $cwd/../../../build/classes/main ]; then +- echo "Unable to locate cassandra class files" >&2 +- exit 1 +-fi +- +-# word_count Jar. +-if [ ! -e $cwd/../build/word_count.jar ]; then +- echo "Unable to locate word_count jar" >&2 +- exit 1 +-fi +- +-CLASSPATH=$CLASSPATH:$cwd/../build/word_count.jar +-CLASSPATH=$CLASSPATH:.:$cwd/../../../build/classes/main +-CLASSPATH=$CLASSPATH:.:$cwd/../../../build/classes/thrift +-for jar in $cwd/../build/lib/jars/*.jar; do +- CLASSPATH=$CLASSPATH:$jar +-done +-for jar in $cwd/../../../lib/*.jar; do +- CLASSPATH=$CLASSPATH:$jar +-done +-for jar in $cwd/../../../build/lib/jars/*.jar; do +- CLASSPATH=$CLASSPATH:$jar +-done +- +-if [ -x $JAVA_HOME/bin/java ]; then +- JAVA=$JAVA_HOME/bin/java +-else +- JAVA=`which java` +-fi +- +-if [ "x$JAVA" = "x" ]; then +- echo "Java executable not found (hint: set JAVA_HOME)" >&2 +- exit 1 +-fi +- +-HOST=localhost +-PORT=9160 +-FRAMED=true +- +-"$JAVA" -Xmx1G -ea -Dcassandra.host=$HOST -Dcassandra.port=$PORT -Dcassandra.framed=$FRAMED -cp "$CLASSPATH" WordCountSetup +diff --git a/examples/hadoop_cql3_word_count/build.xml b/examples/hadoop_cql3_word_count/build.xml +deleted file mode 100644 +index 939e1b3..0000000 +--- a/examples/hadoop_cql3_word_count/build.xml ++++ /dev/null +@@ -1,113 +0,0 @@ +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- Downloading Ivy... +- +- +- +- +- +- +- +- +- +- +- +- +- +- +diff --git a/examples/hadoop_cql3_word_count/conf/logback.xml b/examples/hadoop_cql3_word_count/conf/logback.xml +deleted file mode 100644 +index 443bd1c..0000000 +--- a/examples/hadoop_cql3_word_count/conf/logback.xml ++++ /dev/null +@@ -1,42 +0,0 @@ +- +- +- +- +- +- +- +- wc.out +- +- %-5level [%thread] %date{ISO8601} %F:%L - %msg%n +- +- +- +- +- +- %-5level %date{HH:mm:ss,SSS} %msg%n +- +- +- +- +- +- +- +- +- +diff --git a/examples/hadoop_cql3_word_count/ivy.xml b/examples/hadoop_cql3_word_count/ivy.xml +deleted file mode 100644 +index 2016eb8..0000000 +--- a/examples/hadoop_cql3_word_count/ivy.xml ++++ /dev/null +@@ -1,24 +0,0 @@ +- +- +- +- +- +- +- +diff --git a/examples/hadoop_cql3_word_count/src/WordCount.java b/examples/hadoop_cql3_word_count/src/WordCount.java +deleted file mode 100644 +index bc95736..0000000 +--- a/examples/hadoop_cql3_word_count/src/WordCount.java ++++ /dev/null +@@ -1,259 +0,0 @@ +-/** +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +- +-import java.io.IOException; +-import java.nio.ByteBuffer; +-import java.util.*; +-import java.util.Map.Entry; +- +-import org.apache.cassandra.hadoop.cql3.CqlConfigHelper; +-import org.apache.cassandra.hadoop.cql3.CqlOutputFormat; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.cassandra.hadoop.cql3.CqlInputFormat; +-import org.apache.cassandra.hadoop.ConfigHelper; +-import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.hadoop.conf.Configuration; +-import org.apache.hadoop.conf.Configured; +-import org.apache.hadoop.fs.Path; +-import org.apache.hadoop.io.IntWritable; +-import org.apache.hadoop.io.Text; +-import org.apache.hadoop.mapreduce.Job; +-import org.apache.hadoop.mapreduce.Mapper; +-import org.apache.hadoop.mapreduce.Reducer; +-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +-import org.apache.hadoop.util.Tool; +-import org.apache.hadoop.util.ToolRunner; +-import com.datastax.driver.core.Row; +- +-/** +- * This counts the occurrences of words in ColumnFamily +- * cql3_wordcount ( id uuid, +- * line text, +- * PRIMARY KEY (id)) +- * +- * For each word, we output the total number of occurrences across all body texts. +- * +- * When outputting to Cassandra, we write the word counts to column family +- * output_words ( word text, +- * count_num text, +- * PRIMARY KEY (word)) +- * as a {word, count} to columns: word, count_num with a row key of "word sum" +- */ +-public class WordCount extends Configured implements Tool +-{ +- private static final Logger logger = LoggerFactory.getLogger(WordCount.class); +- static final String INPUT_MAPPER_VAR = "input_mapper"; +- static final String KEYSPACE = "cql3_wordcount"; +- static final String COLUMN_FAMILY = "inputs"; +- +- static final String OUTPUT_REDUCER_VAR = "output_reducer"; +- static final String OUTPUT_COLUMN_FAMILY = "output_words"; +- +- private static final String OUTPUT_PATH_PREFIX = "/tmp/word_count"; +- private static final String PRIMARY_KEY = "row_key"; +- +- public static void main(String[] args) throws Exception +- { +- // Let ToolRunner handle generic command-line options +- ToolRunner.run(new Configuration(), new WordCount(), args); +- System.exit(0); +- } +- +- public static class TokenizerMapper extends Mapper, Map, Text, IntWritable> +- { +- private final static IntWritable one = new IntWritable(1); +- private Text word = new Text(); +- private ByteBuffer sourceColumn; +- +- protected void setup(org.apache.hadoop.mapreduce.Mapper.Context context) +- throws IOException, InterruptedException +- { +- } +- +- public void map(Map keys, Map columns, Context context) throws IOException, InterruptedException +- { +- for (Entry column : columns.entrySet()) +- { +- if (!"line".equalsIgnoreCase(column.getKey())) +- continue; +- +- String value = ByteBufferUtil.string(column.getValue()); +- +- StringTokenizer itr = new StringTokenizer(value); +- while (itr.hasMoreTokens()) +- { +- word.set(itr.nextToken()); +- context.write(word, one); +- } +- } +- } +- } +- +- public static class NativeTokenizerMapper extends Mapper +- { +- private final static IntWritable one = new IntWritable(1); +- private Text word = new Text(); +- private ByteBuffer sourceColumn; +- +- protected void setup(org.apache.hadoop.mapreduce.Mapper.Context context) +- throws IOException, InterruptedException +- { +- } +- +- public void map(Long key, Row row, Context context) throws IOException, InterruptedException +- { +- String value = row.getString("line"); +- logger.debug("read {}:{}={} from {}", key, "line", value, context.getInputSplit()); +- StringTokenizer itr = new StringTokenizer(value); +- while (itr.hasMoreTokens()) +- { +- word.set(itr.nextToken()); +- context.write(word, one); +- } +- } +- } +- +- public static class ReducerToFilesystem extends Reducer +- { +- public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException +- { +- int sum = 0; +- for (IntWritable val : values) +- sum += val.get(); +- context.write(key, new IntWritable(sum)); +- } +- } +- +- public static class ReducerToCassandra extends Reducer, List> +- { +- private Map keys; +- private ByteBuffer key; +- protected void setup(org.apache.hadoop.mapreduce.Reducer.Context context) +- throws IOException, InterruptedException +- { +- keys = new LinkedHashMap(); +- } +- +- public void reduce(Text word, Iterable values, Context context) throws IOException, InterruptedException +- { +- int sum = 0; +- for (IntWritable val : values) +- sum += val.get(); +- keys.put("word", ByteBufferUtil.bytes(word.toString())); +- context.write(keys, getBindVariables(word, sum)); +- } +- +- private List getBindVariables(Text word, int sum) +- { +- List variables = new ArrayList(); +- variables.add(ByteBufferUtil.bytes(String.valueOf(sum))); +- return variables; +- } +- } +- +- public int run(String[] args) throws Exception +- { +- String outputReducerType = "filesystem"; +- String inputMapperType = "native"; +- String outputReducer = null; +- String inputMapper = null; +- +- if (args != null) +- { +- if(args[0].startsWith(OUTPUT_REDUCER_VAR)) +- outputReducer = args[0]; +- if(args[0].startsWith(INPUT_MAPPER_VAR)) +- inputMapper = args[0]; +- +- if (args.length == 2) +- { +- if(args[1].startsWith(OUTPUT_REDUCER_VAR)) +- outputReducer = args[1]; +- if(args[1].startsWith(INPUT_MAPPER_VAR)) +- inputMapper = args[1]; +- } +- } +- +- if (outputReducer != null) +- { +- String[] s = outputReducer.split("="); +- if (s != null && s.length == 2) +- outputReducerType = s[1]; +- } +- logger.info("output reducer type: " + outputReducerType); +- if (inputMapper != null) +- { +- String[] s = inputMapper.split("="); +- if (s != null && s.length == 2) +- inputMapperType = s[1]; +- } +- Job job = new Job(getConf(), "wordcount"); +- job.setJarByClass(WordCount.class); +- +- if (outputReducerType.equalsIgnoreCase("filesystem")) +- { +- job.setCombinerClass(ReducerToFilesystem.class); +- job.setReducerClass(ReducerToFilesystem.class); +- job.setOutputKeyClass(Text.class); +- job.setOutputValueClass(IntWritable.class); +- FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH_PREFIX)); +- } +- else +- { +- job.setReducerClass(ReducerToCassandra.class); +- +- job.setMapOutputKeyClass(Text.class); +- job.setMapOutputValueClass(IntWritable.class); +- job.setOutputKeyClass(Map.class); +- job.setOutputValueClass(List.class); +- +- job.setOutputFormatClass(CqlOutputFormat.class); +- +- ConfigHelper.setOutputColumnFamily(job.getConfiguration(), KEYSPACE, OUTPUT_COLUMN_FAMILY); +- job.getConfiguration().set(PRIMARY_KEY, "word,sum"); +- String query = "UPDATE " + KEYSPACE + "." + OUTPUT_COLUMN_FAMILY + +- " SET count_num = ? "; +- CqlConfigHelper.setOutputCql(job.getConfiguration(), query); +- ConfigHelper.setOutputInitialAddress(job.getConfiguration(), "localhost"); +- ConfigHelper.setOutputPartitioner(job.getConfiguration(), "Murmur3Partitioner"); +- } +- +- if (inputMapperType.equalsIgnoreCase("native")) +- { +- job.setMapperClass(NativeTokenizerMapper.class); +- job.setInputFormatClass(CqlInputFormat.class); +- CqlConfigHelper.setInputCql(job.getConfiguration(), "select * from " + COLUMN_FAMILY + " where token(id) > ? and token(id) <= ? allow filtering"); +- } +- else +- { +- job.setMapperClass(TokenizerMapper.class); +- job.setInputFormatClass(CqlInputFormat.class); +- ConfigHelper.setInputRpcPort(job.getConfiguration(), "9160"); +- } +- +- ConfigHelper.setInputInitialAddress(job.getConfiguration(), "localhost"); +- ConfigHelper.setInputColumnFamily(job.getConfiguration(), KEYSPACE, COLUMN_FAMILY); +- ConfigHelper.setInputPartitioner(job.getConfiguration(), "Murmur3Partitioner"); +- +- CqlConfigHelper.setInputCQLPageRowSize(job.getConfiguration(), "3"); +- job.waitForCompletion(true); +- return 0; +- } +-} +diff --git a/examples/hadoop_cql3_word_count/src/WordCountCounters.java b/examples/hadoop_cql3_word_count/src/WordCountCounters.java +deleted file mode 100644 +index 150d18d..0000000 +--- a/examples/hadoop_cql3_word_count/src/WordCountCounters.java ++++ /dev/null +@@ -1,168 +0,0 @@ +-/** +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +- +-import java.io.IOException; +-import java.nio.ByteBuffer; +-import java.nio.charset.CharacterCodingException; +-import java.util.*; +- +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.cassandra.hadoop.cql3.CqlConfigHelper; +-import org.apache.cassandra.hadoop.cql3.CqlInputFormat; +-import org.apache.hadoop.conf.Configuration; +-import org.apache.hadoop.conf.Configured; +-import org.apache.hadoop.fs.Path; +-import org.apache.hadoop.io.Text; +-import org.apache.hadoop.io.LongWritable; +-import org.apache.hadoop.mapreduce.Job; +-import org.apache.hadoop.mapreduce.Mapper; +-import org.apache.hadoop.mapreduce.Reducer; +-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +-import org.apache.hadoop.util.Tool; +-import org.apache.hadoop.util.ToolRunner; +-import com.datastax.driver.core.Row; +-import org.apache.cassandra.hadoop.ConfigHelper; +-import org.apache.cassandra.utils.ByteBufferUtil; +- +- +-/** +- * This sums the word count stored in the input_words_count ColumnFamily for the key "sum". +- * +- * Output is written to a text file. +- */ +-public class WordCountCounters extends Configured implements Tool +-{ +- private static final Logger logger = LoggerFactory.getLogger(WordCountCounters.class); +- +- static final String INPUT_MAPPER_VAR = "input_mapper"; +- static final String COUNTER_COLUMN_FAMILY = "input_words_count"; +- private static final String OUTPUT_PATH_PREFIX = "/tmp/word_count_counters"; +- +- public static void main(String[] args) throws Exception +- { +- // Let ToolRunner handle generic command-line options +- ToolRunner.run(new Configuration(), new WordCountCounters(), args); +- System.exit(0); +- } +- +- public static class SumNativeMapper extends Mapper +- { +- long sum = -1; +- public void map(Long key, Row row, Context context) throws IOException, InterruptedException +- { +- if (sum < 0) +- sum = 0; +- +- logger.debug("read " + key + ":count_num from " + context.getInputSplit()); +- sum += Long.valueOf(row.getString("count_num")); +- } +- +- protected void cleanup(Context context) throws IOException, InterruptedException { +- if (sum > 0) +- context.write(new Text("total_count"), new LongWritable(sum)); +- } +- } +- +- public static class SumMapper extends Mapper, Map, Text, LongWritable> +- { +- long sum = -1; +- +- public void map(Map key, Map columns, Context context) throws IOException, InterruptedException +- { +- if (sum < 0) +- sum = 0; +- +- logger.debug("read " + toString(key) + ":count_num from " + context.getInputSplit()); +- sum += Long.valueOf(ByteBufferUtil.string(columns.get("count_num"))); +- } +- +- protected void cleanup(Context context) throws IOException, InterruptedException { +- if (sum > 0) +- context.write(new Text("total_count"), new LongWritable(sum)); +- } +- +- private String toString(Map keys) +- { +- String result = ""; +- try +- { +- for (ByteBuffer key : keys.values()) +- result = result + ByteBufferUtil.string(key) + ":"; +- } +- catch (CharacterCodingException e) +- { +- logger.error("Failed to print keys", e); +- } +- return result; +- } +- } +- +- public static class ReducerToFilesystem extends Reducer +- { +- long sum = 0; +- +- public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException +- { +- for (LongWritable val : values) +- sum += val.get(); +- context.write(key, new LongWritable(sum)); +- } +- } +- +- public int run(String[] args) throws Exception +- { +- String inputMapperType = "native"; +- if (args != null && args[0].startsWith(INPUT_MAPPER_VAR)) +- { +- String[] arg0 = args[0].split("="); +- if (arg0 != null && arg0.length == 2) +- inputMapperType = arg0[1]; +- } +- Job job = new Job(getConf(), "wordcountcounters"); +- +- job.setCombinerClass(ReducerToFilesystem.class); +- job.setReducerClass(ReducerToFilesystem.class); +- job.setJarByClass(WordCountCounters.class); +- +- ConfigHelper.setInputInitialAddress(job.getConfiguration(), "localhost"); +- ConfigHelper.setInputPartitioner(job.getConfiguration(), "Murmur3Partitioner"); +- ConfigHelper.setInputColumnFamily(job.getConfiguration(), WordCount.KEYSPACE, WordCount.OUTPUT_COLUMN_FAMILY); +- +- CqlConfigHelper.setInputCQLPageRowSize(job.getConfiguration(), "3"); +- if ("native".equals(inputMapperType)) +- { +- job.setMapperClass(SumNativeMapper.class); +- job.setInputFormatClass(CqlInputFormat.class); +- CqlConfigHelper.setInputCql(job.getConfiguration(), "select * from " + WordCount.OUTPUT_COLUMN_FAMILY + " where token(word) > ? and token(word) <= ? allow filtering"); +- } +- else +- { +- job.setMapperClass(SumMapper.class); +- job.setInputFormatClass(CqlInputFormat.class); +- ConfigHelper.setInputRpcPort(job.getConfiguration(), "9160"); +- } +- +- job.setOutputKeyClass(Text.class); +- job.setOutputValueClass(LongWritable.class); +- FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH_PREFIX)); +- job.waitForCompletion(true); +- return 0; +- } +-} +diff --git a/examples/hadoop_cql3_word_count/src/WordCountSetup.java b/examples/hadoop_cql3_word_count/src/WordCountSetup.java +deleted file mode 100644 +index e514d63..0000000 +--- a/examples/hadoop_cql3_word_count/src/WordCountSetup.java ++++ /dev/null +@@ -1,181 +0,0 @@ +-/** +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +- +-import java.nio.ByteBuffer; +-import java.util.*; +- +-import org.apache.cassandra.thrift.*; +-import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.thrift.TException; +-import org.apache.thrift.protocol.TBinaryProtocol; +-import org.apache.thrift.protocol.TProtocol; +-import org.apache.thrift.transport.TFramedTransport; +-import org.apache.thrift.transport.TSocket; +-import org.apache.thrift.transport.TTransport; +-import org.apache.thrift.transport.TTransportException; +- +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class WordCountSetup +-{ +- private static final Logger logger = LoggerFactory.getLogger(WordCountSetup.class); +- +- public static final int TEST_COUNT = 6; +- +- public static void main(String[] args) throws Exception +- { +- Cassandra.Iface client = createConnection(); +- +- setupKeyspace(client); +- client.set_keyspace(WordCount.KEYSPACE); +- setupTable(client); +- insertData(client); +- +- System.exit(0); +- } +- +- private static void setupKeyspace(Cassandra.Iface client) +- throws InvalidRequestException, +- UnavailableException, +- TimedOutException, +- SchemaDisagreementException, +- TException +- { +- KsDef ks; +- try +- { +- ks = client.describe_keyspace(WordCount.KEYSPACE); +- } +- catch(NotFoundException e) +- { +- logger.info("set up keyspace " + WordCount.KEYSPACE); +- String query = "CREATE KEYSPACE " + WordCount.KEYSPACE + +- " WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}"; +- +- client.execute_cql3_query(ByteBufferUtil.bytes(query), Compression.NONE, ConsistencyLevel.ONE); +- +- String verifyQuery = "select count(*) from system.peers"; +- CqlResult result = client.execute_cql3_query(ByteBufferUtil.bytes(verifyQuery), Compression.NONE, ConsistencyLevel.ONE); +- +- long magnitude = ByteBufferUtil.toLong(result.rows.get(0).columns.get(0).value); +- try +- { +- Thread.sleep(1000 * magnitude); +- } +- catch (InterruptedException ie) +- { +- throw new RuntimeException(ie); +- } +- } +- } +- +- private static void setupTable(Cassandra.Iface client) +- throws InvalidRequestException, +- UnavailableException, +- TimedOutException, +- SchemaDisagreementException, +- TException +- { +- String query = "CREATE TABLE " + WordCount.KEYSPACE + "." + WordCount.COLUMN_FAMILY + +- " ( id uuid," + +- " line text, " + +- " PRIMARY KEY (id) ) "; +- +- try +- { +- logger.info("set up table " + WordCount.COLUMN_FAMILY); +- client.execute_cql3_query(ByteBufferUtil.bytes(query), Compression.NONE, ConsistencyLevel.ONE); +- } +- catch (InvalidRequestException e) +- { +- logger.error("failed to create table " + WordCount.KEYSPACE + "." + WordCount.COLUMN_FAMILY, e); +- } +- +- query = "CREATE TABLE " + WordCount.KEYSPACE + "." + WordCount.OUTPUT_COLUMN_FAMILY + +- " ( word text," + +- " count_num text," + +- " PRIMARY KEY (word) ) "; +- +- try +- { +- logger.info("set up table " + WordCount.OUTPUT_COLUMN_FAMILY); +- client.execute_cql3_query(ByteBufferUtil.bytes(query), Compression.NONE, ConsistencyLevel.ONE); +- } +- catch (InvalidRequestException e) +- { +- logger.error("failed to create table " + WordCount.KEYSPACE + "." + WordCount.OUTPUT_COLUMN_FAMILY, e); +- } +- } +- +- private static Cassandra.Iface createConnection() throws TTransportException +- { +- if (System.getProperty("cassandra.host") == null || System.getProperty("cassandra.port") == null) +- { +- logger.warn("cassandra.host or cassandra.port is not defined, using default"); +- } +- return createConnection(System.getProperty("cassandra.host", "localhost"), +- Integer.valueOf(System.getProperty("cassandra.port", "9160"))); +- } +- +- private static Cassandra.Client createConnection(String host, Integer port) throws TTransportException +- { +- TSocket socket = new TSocket(host, port); +- TTransport trans = new TFramedTransport(socket); +- trans.open(); +- TProtocol protocol = new TBinaryProtocol(trans); +- +- return new Cassandra.Client(protocol); +- } +- +- private static void insertData(Cassandra.Iface client) +- throws InvalidRequestException, +- UnavailableException, +- TimedOutException, +- SchemaDisagreementException, +- TException +- { +- String query = "INSERT INTO " + WordCount.COLUMN_FAMILY + +- "(id, line) " + +- " values (?, ?) "; +- CqlPreparedResult result = client.prepare_cql3_query(ByteBufferUtil.bytes(query), Compression.NONE); +- +- String [] body = bodyData(); +- for (int i = 0; i < 5; i++) +- { +- for (int j = 1; j <= 200; j++) +- { +- List values = new ArrayList(); +- values.add(ByteBufferUtil.bytes(UUID.randomUUID())); +- values.add(ByteBufferUtil.bytes(body[i])); +- client.execute_prepared_cql3_query(result.itemId, values, ConsistencyLevel.ONE); +- } +- } +- } +- +- private static String[] bodyData() +- { // Public domain context, source http://en.wikisource.org/wiki/If%E2%80%94 +- return new String[]{ +- "If you can keep your head when all about you", +- "Are losing theirs and blaming it on you", +- "If you can trust yourself when all men doubt you,", +- "But make allowance for their doubting too:", +- "If you can wait and not be tired by waiting," +- }; +- } +-} +diff --git a/ide/idea-iml-file.xml b/ide/idea-iml-file.xml +index f14fe2e..cc90ebf 100644 +--- a/ide/idea-iml-file.xml ++++ b/ide/idea-iml-file.xml +@@ -26,7 +26,6 @@ + + + +- + + + +diff --git a/ide/idea/workspace.xml b/ide/idea/workspace.xml +index 1645d56..d67a341 100644 +--- a/ide/idea/workspace.xml ++++ b/ide/idea/workspace.xml +@@ -155,7 +155,6 @@ + + + + +@@ -180,7 +179,6 @@ + + + + +@@ -291,11 +289,8 @@ + + + +- +- + + +- + + + +@@ -356,4 +351,4 @@ + + + +- +\ No newline at end of file ++ +diff --git a/interface/cassandra.thrift b/interface/cassandra.thrift +deleted file mode 100644 +index f5041c8..0000000 +--- a/interface/cassandra.thrift ++++ /dev/null +@@ -1,945 +0,0 @@ +-#!/usr/local/bin/thrift --java --php --py +-# Licensed to the Apache Software Foundation (ASF) under one +-# or more contributor license agreements. See the NOTICE file +-# distributed with this work for additional information +-# regarding copyright ownership. The ASF licenses this file +-# to you under the Apache License, Version 2.0 (the +-# "License"); you may not use this file except in compliance +-# with the License. You may obtain a copy of the License at +-# +-# http://www.apache.org/licenses/LICENSE-2.0 +-# +-# Unless required by applicable law or agreed to in writing, software +-# distributed under the License is distributed on an "AS IS" BASIS, +-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-# See the License for the specific language governing permissions and +-# limitations under the License. +- +-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +-# *** PLEASE REMEMBER TO EDIT THE VERSION CONSTANT WHEN MAKING CHANGES *** +-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +- +-# +-# Interface definition for Cassandra Service +-# +- +-namespace java org.apache.cassandra.thrift +-namespace cpp org.apache.cassandra +-namespace csharp Apache.Cassandra +-namespace py cassandra +-namespace php cassandra +-namespace perl Cassandra +- +-# Thrift.rb has a bug where top-level modules that include modules +-# with the same name are not properly referenced, so we can't do +-# Cassandra::Cassandra::Client. +-namespace rb CassandraThrift +- +-# The API version (NOT the product version), composed as a dot delimited +-# string with major, minor, and patch level components. +-# +-# - Major: Incremented for backward incompatible changes. An example would +-# be changes to the number or disposition of method arguments. +-# - Minor: Incremented for backward compatible changes. An example would +-# be the addition of a new (optional) method. +-# - Patch: Incremented for bug fixes. The patch level should be increased +-# for every edit that doesn't result in a change to major/minor. +-# +-# See the Semantic Versioning Specification (SemVer) http://semver.org. +-# +-# Note that this backwards compatibility is from the perspective of the server, +-# not the client. Cassandra should always be able to talk to older client +-# software, but client software may not be able to talk to older Cassandra +-# instances. +-# +-# An effort should be made not to break forward-client-compatibility either +-# (e.g. one should avoid removing obsolete fields from the IDL), but no +-# guarantees in this respect are made by the Cassandra project. +-const string VERSION = "20.1.0" +- +- +-# +-# data structures +-# +- +-/** Basic unit of data within a ColumnFamily. +- * @param name, the name by which this column is set and retrieved. Maximum 64KB long. +- * @param value. The data associated with the name. Maximum 2GB long, but in practice you should limit it to small numbers of MB (since Thrift must read the full value into memory to operate on it). +- * @param timestamp. The timestamp is used for conflict detection/resolution when two columns with same name need to be compared. +- * @param ttl. An optional, positive delay (in seconds) after which the column will be automatically deleted. +- */ +-struct Column { +- 1: required binary name, +- 2: optional binary value, +- 3: optional i64 timestamp, +- 4: optional i32 ttl, +-} +- +-/** A named list of columns. +- * @param name. see Column.name. +- * @param columns. A collection of standard Columns. The columns within a super column are defined in an adhoc manner. +- * Columns within a super column do not have to have matching structures (similarly named child columns). +- */ +-struct SuperColumn { +- 1: required binary name, +- 2: required list columns, +-} +- +-struct CounterColumn { +- 1: required binary name, +- 2: required i64 value +-} +- +-struct CounterSuperColumn { +- 1: required binary name, +- 2: required list columns +-} +- +-/** +- Methods for fetching rows/records from Cassandra will return either a single instance of ColumnOrSuperColumn or a list +- of ColumnOrSuperColumns (get_slice()). If you're looking up a SuperColumn (or list of SuperColumns) then the resulting +- instances of ColumnOrSuperColumn will have the requested SuperColumn in the attribute super_column. For queries resulting +- in Columns, those values will be in the attribute column. This change was made between 0.3 and 0.4 to standardize on +- single query methods that may return either a SuperColumn or Column. +- +- If the query was on a counter column family, you will either get a counter_column (instead of a column) or a +- counter_super_column (instead of a super_column) +- +- @param column. The Column returned by get() or get_slice(). +- @param super_column. The SuperColumn returned by get() or get_slice(). +- @param counter_column. The Counterolumn returned by get() or get_slice(). +- @param counter_super_column. The CounterSuperColumn returned by get() or get_slice(). +- */ +-struct ColumnOrSuperColumn { +- 1: optional Column column, +- 2: optional SuperColumn super_column, +- 3: optional CounterColumn counter_column, +- 4: optional CounterSuperColumn counter_super_column +-} +- +- +-# +-# Exceptions +-# (note that internal server errors will raise a TApplicationException, courtesy of Thrift) +-# +- +-/** A specific column was requested that does not exist. */ +-exception NotFoundException { +-} +- +-/** Invalid request could mean keyspace or column family does not exist, required parameters are missing, or a parameter is malformed. +- why contains an associated error message. +-*/ +-exception InvalidRequestException { +- 1: required string why +-} +- +-/** Not all the replicas required could be created and/or read. */ +-exception UnavailableException { +-} +- +-/** RPC timeout was exceeded. either a node failed mid-operation, or load was too high, or the requested op was too large. */ +-exception TimedOutException { +- /** +- * if a write operation was acknowledged by some replicas but not by enough to +- * satisfy the required ConsistencyLevel, the number of successful +- * replies will be given here. In case of atomic_batch_mutate method this field +- * will be set to -1 if the batch was written to the batchlog and to 0 if it wasn't. +- */ +- 1: optional i32 acknowledged_by +- +- /** +- * in case of atomic_batch_mutate method this field tells if the batch +- * was written to the batchlog. +- */ +- 2: optional bool acknowledged_by_batchlog +- +- /** +- * for the CAS method, this field tells if we timed out during the paxos +- * protocol, as opposed to during the commit of our update +- */ +- 3: optional bool paxos_in_progress +-} +- +-/** invalid authentication request (invalid keyspace, user does not exist, or credentials invalid) */ +-exception AuthenticationException { +- 1: required string why +-} +- +-/** invalid authorization request (user does not have access to keyspace) */ +-exception AuthorizationException { +- 1: required string why +-} +- +-/** +- * NOTE: This up outdated exception left for backward compatibility reasons, +- * no actual schema agreement validation is done starting from Cassandra 1.2 +- * +- * schemas are not in agreement across all nodes +- */ +-exception SchemaDisagreementException { +-} +- +- +-# +-# service api +-# +-/** +- * The ConsistencyLevel is an enum that controls both read and write +- * behavior based on the ReplicationFactor of the keyspace. The +- * different consistency levels have different meanings, depending on +- * if you're doing a write or read operation. +- * +- * If W + R > ReplicationFactor, where W is the number of nodes to +- * block for on write, and R the number to block for on reads, you +- * will have strongly consistent behavior; that is, readers will +- * always see the most recent write. Of these, the most interesting is +- * to do QUORUM reads and writes, which gives you consistency while +- * still allowing availability in the face of node failures up to half +- * of . Of course if latency is more important than +- * consistency then you can use lower values for either or both. +- * +- * Some ConsistencyLevels (ONE, TWO, THREE) refer to a specific number +- * of replicas rather than a logical concept that adjusts +- * automatically with the replication factor. Of these, only ONE is +- * commonly used; TWO and (even more rarely) THREE are only useful +- * when you care more about guaranteeing a certain level of +- * durability, than consistency. +- * +- * Write consistency levels make the following guarantees before reporting success to the client: +- * ANY Ensure that the write has been written once somewhere, including possibly being hinted in a non-target node. +- * ONE Ensure that the write has been written to at least 1 node's commit log and memory table +- * TWO Ensure that the write has been written to at least 2 node's commit log and memory table +- * THREE Ensure that the write has been written to at least 3 node's commit log and memory table +- * QUORUM Ensure that the write has been written to / 2 + 1 nodes +- * LOCAL_ONE Ensure that the write has been written to 1 node within the local datacenter (requires NetworkTopologyStrategy) +- * LOCAL_QUORUM Ensure that the write has been written to / 2 + 1 nodes, within the local datacenter (requires NetworkTopologyStrategy) +- * EACH_QUORUM Ensure that the write has been written to / 2 + 1 nodes in each datacenter (requires NetworkTopologyStrategy) +- * ALL Ensure that the write is written to <ReplicationFactor> nodes before responding to the client. +- * +- * Read consistency levels make the following guarantees before returning successful results to the client: +- * ANY Not supported. You probably want ONE instead. +- * ONE Returns the record obtained from a single replica. +- * TWO Returns the record with the most recent timestamp once two replicas have replied. +- * THREE Returns the record with the most recent timestamp once three replicas have replied. +- * QUORUM Returns the record with the most recent timestamp once a majority of replicas have replied. +- * LOCAL_ONE Returns the record with the most recent timestamp once a single replica within the local datacenter have replied. +- * LOCAL_QUORUM Returns the record with the most recent timestamp once a majority of replicas within the local datacenter have replied. +- * EACH_QUORUM Returns the record with the most recent timestamp once a majority of replicas within each datacenter have replied. +- * ALL Returns the record with the most recent timestamp once all replicas have replied (implies no replica may be down).. +-*/ +-enum ConsistencyLevel { +- ONE = 1, +- QUORUM = 2, +- LOCAL_QUORUM = 3, +- EACH_QUORUM = 4, +- ALL = 5, +- ANY = 6, +- TWO = 7, +- THREE = 8, +- SERIAL = 9, +- LOCAL_SERIAL = 10, +- LOCAL_ONE = 11, +-} +- +-/** +- ColumnParent is used when selecting groups of columns from the same ColumnFamily. In directory structure terms, imagine +- ColumnParent as ColumnPath + '/../'. +- +- See also ColumnPath +- */ +-struct ColumnParent { +- 3: required string column_family, +- 4: optional binary super_column, +-} +- +-/** The ColumnPath is the path to a single column in Cassandra. It might make sense to think of ColumnPath and +- * ColumnParent in terms of a directory structure. +- * +- * ColumnPath is used to looking up a single column. +- * +- * @param column_family. The name of the CF of the column being looked up. +- * @param super_column. The super column name. +- * @param column. The column name. +- */ +-struct ColumnPath { +- 3: required string column_family, +- 4: optional binary super_column, +- 5: optional binary column, +-} +- +-/** +- A slice range is a structure that stores basic range, ordering and limit information for a query that will return +- multiple columns. It could be thought of as Cassandra's version of LIMIT and ORDER BY +- +- @param start. The column name to start the slice with. This attribute is not required, though there is no default value, +- and can be safely set to '', i.e., an empty byte array, to start with the first column name. Otherwise, it +- must a valid value under the rules of the Comparator defined for the given ColumnFamily. +- @param finish. The column name to stop the slice at. This attribute is not required, though there is no default value, +- and can be safely set to an empty byte array to not stop until 'count' results are seen. Otherwise, it +- must also be a valid value to the ColumnFamily Comparator. +- @param reversed. Whether the results should be ordered in reversed order. Similar to ORDER BY blah DESC in SQL. +- @param count. How many columns to return. Similar to LIMIT in SQL. May be arbitrarily large, but Thrift will +- materialize the whole result into memory before returning it to the client, so be aware that you may +- be better served by iterating through slices by passing the last value of one call in as the 'start' +- of the next instead of increasing 'count' arbitrarily large. +- */ +-struct SliceRange { +- 1: required binary start, +- 2: required binary finish, +- 3: required bool reversed=0, +- 4: required i32 count=100, +-} +- +-/** +- A SlicePredicate is similar to a mathematic predicate (see http://en.wikipedia.org/wiki/Predicate_(mathematical_logic)), +- which is described as "a property that the elements of a set have in common." +- +- SlicePredicate's in Cassandra are described with either a list of column_names or a SliceRange. If column_names is +- specified, slice_range is ignored. +- +- @param column_name. A list of column names to retrieve. This can be used similar to Memcached's "multi-get" feature +- to fetch N known column names. For instance, if you know you wish to fetch columns 'Joe', 'Jack', +- and 'Jim' you can pass those column names as a list to fetch all three at once. +- @param slice_range. A SliceRange describing how to range, order, and/or limit the slice. +- */ +-struct SlicePredicate { +- 1: optional list column_names, +- 2: optional SliceRange slice_range, +-} +- +-enum IndexOperator { +- EQ, +- GTE, +- GT, +- LTE, +- LT +-} +- +-struct IndexExpression { +- 1: required binary column_name, +- 2: required IndexOperator op, +- 3: required binary value, +-} +- +-/** +- * @deprecated use a KeyRange with row_filter in get_range_slices instead +- */ +-struct IndexClause { +- 1: required list expressions, +- 2: required binary start_key, +- 3: required i32 count=100, +-} +- +- +-/** +-The semantics of start keys and tokens are slightly different. +-Keys are start-inclusive; tokens are start-exclusive. Token +-ranges may also wrap -- that is, the end token may be less +-than the start one. Thus, a range from keyX to keyX is a +-one-element range, but a range from tokenY to tokenY is the +-full ring. +-*/ +-struct KeyRange { +- 1: optional binary start_key, +- 2: optional binary end_key, +- 3: optional string start_token, +- 4: optional string end_token, +- 6: optional list row_filter, +- 5: required i32 count=100 +-} +- +-/** +- A KeySlice is key followed by the data it maps to. A collection of KeySlice is returned by the get_range_slice operation. +- +- @param key. a row key +- @param columns. List of data represented by the key. Typically, the list is pared down to only the columns specified by +- a SlicePredicate. +- */ +-struct KeySlice { +- 1: required binary key, +- 2: required list columns, +-} +- +-struct KeyCount { +- 1: required binary key, +- 2: required i32 count +-} +- +-/** +- * Note that the timestamp is only optional in case of counter deletion. +- */ +-struct Deletion { +- 1: optional i64 timestamp, +- 2: optional binary super_column, +- 3: optional SlicePredicate predicate, +-} +- +-/** +- A Mutation is either an insert (represented by filling column_or_supercolumn) or a deletion (represented by filling the deletion attribute). +- @param column_or_supercolumn. An insert to a column or supercolumn (possibly counter column or supercolumn) +- @param deletion. A deletion of a column or supercolumn +-*/ +-struct Mutation { +- 1: optional ColumnOrSuperColumn column_or_supercolumn, +- 2: optional Deletion deletion, +-} +- +-struct EndpointDetails { +- 1: string host, +- 2: string datacenter, +- 3: optional string rack +-} +- +-struct CASResult { +- 1: required bool success, +- 2: optional list current_values, +-} +- +-/** +- A TokenRange describes part of the Cassandra ring, it is a mapping from a range to +- endpoints responsible for that range. +- @param start_token The first token in the range +- @param end_token The last token in the range +- @param endpoints The endpoints responsible for the range (listed by their configured listen_address) +- @param rpc_endpoints The endpoints responsible for the range (listed by their configured rpc_address) +-*/ +-struct TokenRange { +- 1: required string start_token, +- 2: required string end_token, +- 3: required list endpoints, +- 4: optional list rpc_endpoints +- 5: optional list endpoint_details, +-} +- +-/** +- Authentication requests can contain any data, dependent on the IAuthenticator used +-*/ +-struct AuthenticationRequest { +- 1: required map credentials +-} +- +-enum IndexType { +- KEYS, +- CUSTOM, +- COMPOSITES +-} +- +-/* describes a column in a column family. */ +-struct ColumnDef { +- 1: required binary name, +- 2: required string validation_class, +- 3: optional IndexType index_type, +- 4: optional string index_name, +- 5: optional map index_options +-} +- +-/** +- Describes a trigger. +- `options` should include at least 'class' param. +- Other options are not supported yet. +-*/ +-struct TriggerDef { +- 1: required string name, +- 2: required map options +-} +- +-/* describes a column family. */ +-struct CfDef { +- 1: required string keyspace, +- 2: required string name, +- 3: optional string column_type="Standard", +- 5: optional string comparator_type="BytesType", +- 6: optional string subcomparator_type, +- 8: optional string comment, +- 12: optional double read_repair_chance, +- 13: optional list column_metadata, +- 14: optional i32 gc_grace_seconds, +- 15: optional string default_validation_class, +- 16: optional i32 id, +- 17: optional i32 min_compaction_threshold, +- 18: optional i32 max_compaction_threshold, +- 26: optional string key_validation_class, +- 28: optional binary key_alias, +- 29: optional string compaction_strategy, +- 30: optional map compaction_strategy_options, +- 32: optional map compression_options, +- 33: optional double bloom_filter_fp_chance, +- 34: optional string caching="keys_only", +- 37: optional double dclocal_read_repair_chance = 0.0, +- 39: optional i32 memtable_flush_period_in_ms, +- 40: optional i32 default_time_to_live, +- 42: optional string speculative_retry="NONE", +- 43: optional list triggers, +- 44: optional string cells_per_row_to_cache = "100", +- 45: optional i32 min_index_interval, +- 46: optional i32 max_index_interval, +- +- /* All of the following are now ignored and unsupplied. */ +- +- /** @deprecated */ +- 9: optional double row_cache_size, +- /** @deprecated */ +- 11: optional double key_cache_size, +- /** @deprecated */ +- 19: optional i32 row_cache_save_period_in_seconds, +- /** @deprecated */ +- 20: optional i32 key_cache_save_period_in_seconds, +- /** @deprecated */ +- 21: optional i32 memtable_flush_after_mins, +- /** @deprecated */ +- 22: optional i32 memtable_throughput_in_mb, +- /** @deprecated */ +- 23: optional double memtable_operations_in_millions, +- /** @deprecated */ +- 24: optional bool replicate_on_write, +- /** @deprecated */ +- 25: optional double merge_shards_chance, +- /** @deprecated */ +- 27: optional string row_cache_provider, +- /** @deprecated */ +- 31: optional i32 row_cache_keys_to_save, +- /** @deprecated */ +- 38: optional bool populate_io_cache_on_flush, +- /** @deprecated */ +- 41: optional i32 index_interval, +-} +- +-/* describes a keyspace. */ +-struct KsDef { +- 1: required string name, +- 2: required string strategy_class, +- 3: optional map strategy_options, +- +- /** @deprecated ignored */ +- 4: optional i32 replication_factor, +- +- 5: required list cf_defs, +- 6: optional bool durable_writes=1, +-} +- +-/** CQL query compression */ +-enum Compression { +- GZIP = 1, +- NONE = 2 +-} +- +-enum CqlResultType { +- ROWS = 1, +- VOID = 2, +- INT = 3 +-} +- +-/** +- Row returned from a CQL query. +- +- This struct is used for both CQL2 and CQL3 queries. For CQL2, the partition key +- is special-cased and is always returned. For CQL3, it is not special cased; +- it will be included in the columns list if it was included in the SELECT and +- the key field is always null. +-*/ +-struct CqlRow { +- 1: required binary key, +- 2: required list columns +-} +- +-struct CqlMetadata { +- 1: required map name_types, +- 2: required map value_types, +- 3: required string default_name_type, +- 4: required string default_value_type +-} +- +-struct CqlResult { +- 1: required CqlResultType type, +- 2: optional list rows, +- 3: optional i32 num, +- 4: optional CqlMetadata schema +-} +- +-struct CqlPreparedResult { +- 1: required i32 itemId, +- 2: required i32 count, +- 3: optional list variable_types, +- 4: optional list variable_names +-} +- +-/** Represents input splits used by hadoop ColumnFamilyRecordReaders */ +-struct CfSplit { +- 1: required string start_token, +- 2: required string end_token, +- 3: required i64 row_count +-} +- +-/** The ColumnSlice is used to select a set of columns from inside a row. +- * If start or finish are unspecified they will default to the start-of +- * end-of value. +- * @param start. The start of the ColumnSlice inclusive +- * @param finish. The end of the ColumnSlice inclusive +- */ +-struct ColumnSlice { +- 1: optional binary start, +- 2: optional binary finish +-} +- +-/** +- * Used to perform multiple slices on a single row key in one rpc operation +- * @param key. The row key to be multi sliced +- * @param column_parent. The column family (super columns are unsupported) +- * @param column_slices. 0 to many ColumnSlice objects each will be used to select columns +- * @param reversed. Direction of slice +- * @param count. Maximum number of columns +- * @param consistency_level. Level to perform the operation at +- */ +-struct MultiSliceRequest { +- 1: optional binary key, +- 2: optional ColumnParent column_parent, +- 3: optional list column_slices, +- 4: optional bool reversed=false, +- 5: optional i32 count=1000, +- 6: optional ConsistencyLevel consistency_level=ConsistencyLevel.ONE +-} +- +-service Cassandra { +- # auth methods +- void login(1: required AuthenticationRequest auth_request) throws (1:AuthenticationException authnx, 2:AuthorizationException authzx), +- +- # set keyspace +- void set_keyspace(1: required string keyspace) throws (1:InvalidRequestException ire), +- +- # retrieval methods +- +- /** +- Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is +- the only method that can throw an exception under non-failure conditions.) +- */ +- ColumnOrSuperColumn get(1:required binary key, +- 2:required ColumnPath column_path, +- 3:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:NotFoundException nfe, 3:UnavailableException ue, 4:TimedOutException te), +- +- /** +- Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name +- pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned. +- */ +- list get_slice(1:required binary key, +- 2:required ColumnParent column_parent, +- 3:required SlicePredicate predicate, +- 4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- returns the number of columns matching predicate for a particular key, +- ColumnFamily and optionally SuperColumn. +- */ +- i32 get_count(1:required binary key, +- 2:required ColumnParent column_parent, +- 3:required SlicePredicate predicate, +- 4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- Performs a get_slice for column_parent and predicate for the given keys in parallel. +- */ +- map> multiget_slice(1:required list keys, +- 2:required ColumnParent column_parent, +- 3:required SlicePredicate predicate, +- 4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- Perform a get_count in parallel on the given list keys. The return value maps keys to the count found. +- */ +- map multiget_count(1:required list keys, +- 2:required ColumnParent column_parent, +- 3:required SlicePredicate predicate, +- 4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- returns a subset of columns for a contiguous range of keys. +- */ +- list get_range_slices(1:required ColumnParent column_parent, +- 2:required SlicePredicate predicate, +- 3:required KeyRange range, +- 4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- returns a range of columns, wrapping to the next rows if necessary to collect max_results. +- */ +- list get_paged_slice(1:required string column_family, +- 2:required KeyRange range, +- 3:required binary start_column, +- 4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause +- @deprecated use get_range_slices instead with range.row_filter specified +- */ +- list get_indexed_slices(1:required ColumnParent column_parent, +- 2:required IndexClause index_clause, +- 3:required SlicePredicate column_predicate, +- 4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- # modification methods +- +- /** +- * Insert a Column at the given column_parent.column_family and optional column_parent.super_column. +- */ +- void insert(1:required binary key, +- 2:required ColumnParent column_parent, +- 3:required Column column, +- 4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- * Increment or decrement a counter. +- */ +- void add(1:required binary key, +- 2:required ColumnParent column_parent, +- 3:required CounterColumn column, +- 4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- * Atomic compare and set. +- * +- * If the cas is successfull, the success boolean in CASResult will be true and there will be no current_values. +- * Otherwise, success will be false and current_values will contain the current values for the columns in +- * expected (that, by definition of compare-and-set, will differ from the values in expected). +- * +- * A cas operation takes 2 consistency level. The first one, serial_consistency_level, simply indicates the +- * level of serialization required. This can be either ConsistencyLevel.SERIAL or ConsistencyLevel.LOCAL_SERIAL. +- * The second one, commit_consistency_level, defines the consistency level for the commit phase of the cas. This +- * is a more traditional consistency level (the same CL than for traditional writes are accepted) that impact +- * the visibility for reads of the operation. For instance, if commit_consistency_level is QUORUM, then it is +- * guaranteed that a followup QUORUM read will see the cas write (if that one was successful obviously). If +- * commit_consistency_level is ANY, you will need to use a SERIAL/LOCAL_SERIAL read to be guaranteed to see +- * the write. +- */ +- CASResult cas(1:required binary key, +- 2:required string column_family, +- 3:list expected, +- 4:list updates, +- 5:required ConsistencyLevel serial_consistency_level=ConsistencyLevel.SERIAL, +- 6:required ConsistencyLevel commit_consistency_level=ConsistencyLevel.QUORUM) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note +- that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire +- row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too. +- */ +- void remove(1:required binary key, +- 2:required ColumnPath column_path, +- 3:required i64 timestamp, +- 4:ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- * Remove a counter at the specified location. +- * Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update +- * until the delete has reached all the nodes and all of them have been fully compacted. +- */ +- void remove_counter(1:required binary key, +- 2:required ColumnPath path, +- 3:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- Mutate many columns or super columns for many row keys. See also: Mutation. +- +- mutation_map maps key to column family to a list of Mutation objects to take place at that scope. +- **/ +- void batch_mutate(1:required map>> mutation_map, +- 2:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- Atomically mutate many columns or super columns for many row keys. See also: Mutation. +- +- mutation_map maps key to column family to a list of Mutation objects to take place at that scope. +- **/ +- void atomic_batch_mutate(1:required map>> mutation_map, +- 2:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- /** +- Truncate will mark and entire column family as deleted. +- From the user's perspective a successful call to truncate will result complete data deletion from cfname. +- Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one +- only marks the data as deleted. +- The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if +- some hosts are down. +- */ +- void truncate(1:required string cfname) +- throws (1: InvalidRequestException ire, 2: UnavailableException ue, 3: TimedOutException te), +- +- /** +- * Select multiple slices of a key in a single RPC operation +- */ +- list get_multi_slice(1:required MultiSliceRequest request) +- throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te), +- +- // Meta-APIs -- APIs to get information about the node or cluster, +- // rather than user data. The nodeprobe program provides usage examples. +- +- /** +- * for each schema version present in the cluster, returns a list of nodes at that version. +- * hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION. +- * the cluster is all on the same version if the size of the map is 1. +- */ +- map> describe_schema_versions() +- throws (1: InvalidRequestException ire), +- +- /** list the defined keyspaces in this cluster */ +- list describe_keyspaces() +- throws (1:InvalidRequestException ire), +- +- /** get the cluster name */ +- string describe_cluster_name(), +- +- /** get the thrift api version */ +- string describe_version(), +- +- /** get the token ring: a map of ranges to host addresses, +- represented as a set of TokenRange instead of a map from range +- to list of endpoints, because you can't use Thrift structs as +- map keys: +- https://issues.apache.org/jira/browse/THRIFT-162 +- +- for the same reason, we can't return a set here, even though +- order is neither important nor predictable. */ +- list describe_ring(1:required string keyspace) +- throws (1:InvalidRequestException ire), +- +- +- /** same as describe_ring, but considers only nodes in the local DC */ +- list describe_local_ring(1:required string keyspace) +- throws (1:InvalidRequestException ire), +- +- /** get the mapping between token->node ip +- without taking replication into consideration +- https://issues.apache.org/jira/browse/CASSANDRA-4092 */ +- map describe_token_map() +- throws (1:InvalidRequestException ire), +- +- /** returns the partitioner used by this cluster */ +- string describe_partitioner(), +- +- /** returns the snitch used by this cluster */ +- string describe_snitch(), +- +- /** describe specified keyspace */ +- KsDef describe_keyspace(1:required string keyspace) +- throws (1:NotFoundException nfe, 2:InvalidRequestException ire), +- +- /** experimental API for hadoop/parallel query support. +- may change violently and without warning. +- +- returns list of token strings such that first subrange is (list[0], list[1]], +- next is (list[1], list[2]], etc. */ +- list describe_splits(1:required string cfName, +- 2:required string start_token, +- 3:required string end_token, +- 4:required i32 keys_per_split) +- throws (1:InvalidRequestException ire), +- +- /** Enables tracing for the next query in this connection and returns the UUID for that trace session +- The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace */ +- binary trace_next_query(), +- +- list describe_splits_ex(1:required string cfName, +- 2:required string start_token, +- 3:required string end_token, +- 4:required i32 keys_per_split) +- throws (1:InvalidRequestException ire), +- +- /** adds a column family. returns the new schema id. */ +- string system_add_column_family(1:required CfDef cf_def) +- throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde), +- +- /** drops a column family. returns the new schema id. */ +- string system_drop_column_family(1:required string column_family) +- throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde), +- +- /** adds a keyspace and any column families that are part of it. returns the new schema id. */ +- string system_add_keyspace(1:required KsDef ks_def) +- throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde), +- +- /** drops a keyspace and any column families that are part of it. returns the new schema id. */ +- string system_drop_keyspace(1:required string keyspace) +- throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde), +- +- /** updates properties of a keyspace. returns the new schema id. */ +- string system_update_keyspace(1:required KsDef ks_def) +- throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde), +- +- /** updates properties of a column family. returns the new schema id. */ +- string system_update_column_family(1:required CfDef cf_def) +- throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde), +- +- +- /** +- * @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead. +- */ +- CqlResult execute_cql_query(1:required binary query, 2:required Compression compression) +- throws (1:InvalidRequestException ire, +- 2:UnavailableException ue, +- 3:TimedOutException te, +- 4:SchemaDisagreementException sde) +- +- /** +- * Executes a CQL3 (Cassandra Query Language) statement and returns a +- * CqlResult containing the results. +- */ +- CqlResult execute_cql3_query(1:required binary query, 2:required Compression compression, 3:required ConsistencyLevel consistency) +- throws (1:InvalidRequestException ire, +- 2:UnavailableException ue, +- 3:TimedOutException te, +- 4:SchemaDisagreementException sde) +- +- +- /** +- * @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead. +- */ +- CqlPreparedResult prepare_cql_query(1:required binary query, 2:required Compression compression) +- throws (1:InvalidRequestException ire) +- +- /** +- * Prepare a CQL3 (Cassandra Query Language) statement by compiling and returning +- * - the type of CQL statement +- * - an id token of the compiled CQL stored on the server side. +- * - a count of the discovered bound markers in the statement +- */ +- CqlPreparedResult prepare_cql3_query(1:required binary query, 2:required Compression compression) +- throws (1:InvalidRequestException ire) +- +- +- /** +- * @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead. +- */ +- CqlResult execute_prepared_cql_query(1:required i32 itemId, 2:required list values) +- throws (1:InvalidRequestException ire, +- 2:UnavailableException ue, +- 3:TimedOutException te, +- 4:SchemaDisagreementException sde) +- +- /** +- * Executes a prepared CQL3 (Cassandra Query Language) statement by passing an id token, a list of variables +- * to bind, and the consistency level, and returns a CqlResult containing the results. +- */ +- CqlResult execute_prepared_cql3_query(1:required i32 itemId, 2:required list values, 3:required ConsistencyLevel consistency) +- throws (1:InvalidRequestException ire, +- 2:UnavailableException ue, +- 3:TimedOutException te, +- 4:SchemaDisagreementException sde) +- +- /** +- * @deprecated This is now a no-op. Please use the CQL3 specific methods instead. +- */ +- void set_cql_version(1: required string version) throws (1:InvalidRequestException ire) +-} +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/AuthenticationException.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/AuthenticationException.java +deleted file mode 100644 +index b16c400..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/AuthenticationException.java ++++ /dev/null +@@ -1,413 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * invalid authentication request (invalid keyspace, user does not exist, or credentials invalid) +- */ +-public class AuthenticationException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AuthenticationException"); +- +- private static final org.apache.thrift.protocol.TField WHY_FIELD_DESC = new org.apache.thrift.protocol.TField("why", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new AuthenticationExceptionStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new AuthenticationExceptionTupleSchemeFactory()); +- } +- +- public String why; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- WHY((short)1, "why"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // WHY +- return WHY; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.WHY, new org.apache.thrift.meta_data.FieldMetaData("why", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AuthenticationException.class, metaDataMap); +- } +- +- public AuthenticationException() { +- } +- +- public AuthenticationException( +- String why) +- { +- this(); +- this.why = why; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public AuthenticationException(AuthenticationException other) { +- if (other.isSetWhy()) { +- this.why = other.why; +- } +- } +- +- public AuthenticationException deepCopy() { +- return new AuthenticationException(this); +- } +- +- @Override +- public void clear() { +- this.why = null; +- } +- +- public String getWhy() { +- return this.why; +- } +- +- public AuthenticationException setWhy(String why) { +- this.why = why; +- return this; +- } +- +- public void unsetWhy() { +- this.why = null; +- } +- +- /** Returns true if field why is set (has been assigned a value) and false otherwise */ +- public boolean isSetWhy() { +- return this.why != null; +- } +- +- public void setWhyIsSet(boolean value) { +- if (!value) { +- this.why = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case WHY: +- if (value == null) { +- unsetWhy(); +- } else { +- setWhy((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case WHY: +- return getWhy(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case WHY: +- return isSetWhy(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof AuthenticationException) +- return this.equals((AuthenticationException)that); +- return false; +- } +- +- public boolean equals(AuthenticationException that) { +- if (that == null) +- return false; +- +- boolean this_present_why = true && this.isSetWhy(); +- boolean that_present_why = true && that.isSetWhy(); +- if (this_present_why || that_present_why) { +- if (!(this_present_why && that_present_why)) +- return false; +- if (!this.why.equals(that.why)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_why = true && (isSetWhy()); +- builder.append(present_why); +- if (present_why) +- builder.append(why); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(AuthenticationException other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetWhy()).compareTo(other.isSetWhy()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetWhy()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.why, other.why); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("AuthenticationException("); +- boolean first = true; +- +- sb.append("why:"); +- if (this.why == null) { +- sb.append("null"); +- } else { +- sb.append(this.why); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (why == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'why' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class AuthenticationExceptionStandardSchemeFactory implements SchemeFactory { +- public AuthenticationExceptionStandardScheme getScheme() { +- return new AuthenticationExceptionStandardScheme(); +- } +- } +- +- private static class AuthenticationExceptionStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, AuthenticationException struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // WHY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.why = iprot.readString(); +- struct.setWhyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, AuthenticationException struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.why != null) { +- oprot.writeFieldBegin(WHY_FIELD_DESC); +- oprot.writeString(struct.why); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class AuthenticationExceptionTupleSchemeFactory implements SchemeFactory { +- public AuthenticationExceptionTupleScheme getScheme() { +- return new AuthenticationExceptionTupleScheme(); +- } +- } +- +- private static class AuthenticationExceptionTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, AuthenticationException struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.why); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, AuthenticationException struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.why = iprot.readString(); +- struct.setWhyIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/AuthenticationRequest.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/AuthenticationRequest.java +deleted file mode 100644 +index 5778fa5..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/AuthenticationRequest.java ++++ /dev/null +@@ -1,465 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Authentication requests can contain any data, dependent on the IAuthenticator used +- */ +-public class AuthenticationRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AuthenticationRequest"); +- +- private static final org.apache.thrift.protocol.TField CREDENTIALS_FIELD_DESC = new org.apache.thrift.protocol.TField("credentials", org.apache.thrift.protocol.TType.MAP, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new AuthenticationRequestStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new AuthenticationRequestTupleSchemeFactory()); +- } +- +- public Map credentials; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- CREDENTIALS((short)1, "credentials"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // CREDENTIALS +- return CREDENTIALS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.CREDENTIALS, new org.apache.thrift.meta_data.FieldMetaData("credentials", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AuthenticationRequest.class, metaDataMap); +- } +- +- public AuthenticationRequest() { +- } +- +- public AuthenticationRequest( +- Map credentials) +- { +- this(); +- this.credentials = credentials; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public AuthenticationRequest(AuthenticationRequest other) { +- if (other.isSetCredentials()) { +- Map __this__credentials = new HashMap(other.credentials); +- this.credentials = __this__credentials; +- } +- } +- +- public AuthenticationRequest deepCopy() { +- return new AuthenticationRequest(this); +- } +- +- @Override +- public void clear() { +- this.credentials = null; +- } +- +- public int getCredentialsSize() { +- return (this.credentials == null) ? 0 : this.credentials.size(); +- } +- +- public void putToCredentials(String key, String val) { +- if (this.credentials == null) { +- this.credentials = new HashMap(); +- } +- this.credentials.put(key, val); +- } +- +- public Map getCredentials() { +- return this.credentials; +- } +- +- public AuthenticationRequest setCredentials(Map credentials) { +- this.credentials = credentials; +- return this; +- } +- +- public void unsetCredentials() { +- this.credentials = null; +- } +- +- /** Returns true if field credentials is set (has been assigned a value) and false otherwise */ +- public boolean isSetCredentials() { +- return this.credentials != null; +- } +- +- public void setCredentialsIsSet(boolean value) { +- if (!value) { +- this.credentials = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case CREDENTIALS: +- if (value == null) { +- unsetCredentials(); +- } else { +- setCredentials((Map)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case CREDENTIALS: +- return getCredentials(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case CREDENTIALS: +- return isSetCredentials(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof AuthenticationRequest) +- return this.equals((AuthenticationRequest)that); +- return false; +- } +- +- public boolean equals(AuthenticationRequest that) { +- if (that == null) +- return false; +- +- boolean this_present_credentials = true && this.isSetCredentials(); +- boolean that_present_credentials = true && that.isSetCredentials(); +- if (this_present_credentials || that_present_credentials) { +- if (!(this_present_credentials && that_present_credentials)) +- return false; +- if (!this.credentials.equals(that.credentials)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_credentials = true && (isSetCredentials()); +- builder.append(present_credentials); +- if (present_credentials) +- builder.append(credentials); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(AuthenticationRequest other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetCredentials()).compareTo(other.isSetCredentials()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCredentials()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.credentials, other.credentials); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("AuthenticationRequest("); +- boolean first = true; +- +- sb.append("credentials:"); +- if (this.credentials == null) { +- sb.append("null"); +- } else { +- sb.append(this.credentials); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (credentials == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'credentials' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class AuthenticationRequestStandardSchemeFactory implements SchemeFactory { +- public AuthenticationRequestStandardScheme getScheme() { +- return new AuthenticationRequestStandardScheme(); +- } +- } +- +- private static class AuthenticationRequestStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, AuthenticationRequest struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // CREDENTIALS +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map80 = iprot.readMapBegin(); +- struct.credentials = new HashMap(2*_map80.size); +- for (int _i81 = 0; _i81 < _map80.size; ++_i81) +- { +- String _key82; +- String _val83; +- _key82 = iprot.readString(); +- _val83 = iprot.readString(); +- struct.credentials.put(_key82, _val83); +- } +- iprot.readMapEnd(); +- } +- struct.setCredentialsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, AuthenticationRequest struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.credentials != null) { +- oprot.writeFieldBegin(CREDENTIALS_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.credentials.size())); +- for (Map.Entry _iter84 : struct.credentials.entrySet()) +- { +- oprot.writeString(_iter84.getKey()); +- oprot.writeString(_iter84.getValue()); +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class AuthenticationRequestTupleSchemeFactory implements SchemeFactory { +- public AuthenticationRequestTupleScheme getScheme() { +- return new AuthenticationRequestTupleScheme(); +- } +- } +- +- private static class AuthenticationRequestTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, AuthenticationRequest struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- { +- oprot.writeI32(struct.credentials.size()); +- for (Map.Entry _iter85 : struct.credentials.entrySet()) +- { +- oprot.writeString(_iter85.getKey()); +- oprot.writeString(_iter85.getValue()); +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, AuthenticationRequest struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- { +- org.apache.thrift.protocol.TMap _map86 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.credentials = new HashMap(2*_map86.size); +- for (int _i87 = 0; _i87 < _map86.size; ++_i87) +- { +- String _key88; +- String _val89; +- _key88 = iprot.readString(); +- _val89 = iprot.readString(); +- struct.credentials.put(_key88, _val89); +- } +- } +- struct.setCredentialsIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/AuthorizationException.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/AuthorizationException.java +deleted file mode 100644 +index cd1bdf7..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/AuthorizationException.java ++++ /dev/null +@@ -1,413 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * invalid authorization request (user does not have access to keyspace) +- */ +-public class AuthorizationException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AuthorizationException"); +- +- private static final org.apache.thrift.protocol.TField WHY_FIELD_DESC = new org.apache.thrift.protocol.TField("why", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new AuthorizationExceptionStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new AuthorizationExceptionTupleSchemeFactory()); +- } +- +- public String why; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- WHY((short)1, "why"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // WHY +- return WHY; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.WHY, new org.apache.thrift.meta_data.FieldMetaData("why", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AuthorizationException.class, metaDataMap); +- } +- +- public AuthorizationException() { +- } +- +- public AuthorizationException( +- String why) +- { +- this(); +- this.why = why; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public AuthorizationException(AuthorizationException other) { +- if (other.isSetWhy()) { +- this.why = other.why; +- } +- } +- +- public AuthorizationException deepCopy() { +- return new AuthorizationException(this); +- } +- +- @Override +- public void clear() { +- this.why = null; +- } +- +- public String getWhy() { +- return this.why; +- } +- +- public AuthorizationException setWhy(String why) { +- this.why = why; +- return this; +- } +- +- public void unsetWhy() { +- this.why = null; +- } +- +- /** Returns true if field why is set (has been assigned a value) and false otherwise */ +- public boolean isSetWhy() { +- return this.why != null; +- } +- +- public void setWhyIsSet(boolean value) { +- if (!value) { +- this.why = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case WHY: +- if (value == null) { +- unsetWhy(); +- } else { +- setWhy((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case WHY: +- return getWhy(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case WHY: +- return isSetWhy(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof AuthorizationException) +- return this.equals((AuthorizationException)that); +- return false; +- } +- +- public boolean equals(AuthorizationException that) { +- if (that == null) +- return false; +- +- boolean this_present_why = true && this.isSetWhy(); +- boolean that_present_why = true && that.isSetWhy(); +- if (this_present_why || that_present_why) { +- if (!(this_present_why && that_present_why)) +- return false; +- if (!this.why.equals(that.why)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_why = true && (isSetWhy()); +- builder.append(present_why); +- if (present_why) +- builder.append(why); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(AuthorizationException other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetWhy()).compareTo(other.isSetWhy()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetWhy()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.why, other.why); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("AuthorizationException("); +- boolean first = true; +- +- sb.append("why:"); +- if (this.why == null) { +- sb.append("null"); +- } else { +- sb.append(this.why); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (why == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'why' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class AuthorizationExceptionStandardSchemeFactory implements SchemeFactory { +- public AuthorizationExceptionStandardScheme getScheme() { +- return new AuthorizationExceptionStandardScheme(); +- } +- } +- +- private static class AuthorizationExceptionStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, AuthorizationException struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // WHY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.why = iprot.readString(); +- struct.setWhyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, AuthorizationException struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.why != null) { +- oprot.writeFieldBegin(WHY_FIELD_DESC); +- oprot.writeString(struct.why); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class AuthorizationExceptionTupleSchemeFactory implements SchemeFactory { +- public AuthorizationExceptionTupleScheme getScheme() { +- return new AuthorizationExceptionTupleScheme(); +- } +- } +- +- private static class AuthorizationExceptionTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, AuthorizationException struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.why); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, AuthorizationException struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.why = iprot.readString(); +- struct.setWhyIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CASResult.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CASResult.java +deleted file mode 100644 +index 4d21bfe..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CASResult.java ++++ /dev/null +@@ -1,574 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class CASResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CASResult"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)1); +- private static final org.apache.thrift.protocol.TField CURRENT_VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("current_values", org.apache.thrift.protocol.TType.LIST, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new CASResultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new CASResultTupleSchemeFactory()); +- } +- +- public boolean success; // required +- public List current_values; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)1, "success"), +- CURRENT_VALUES((short)2, "current_values"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // SUCCESS +- return SUCCESS; +- case 2: // CURRENT_VALUES +- return CURRENT_VALUES; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __SUCCESS_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- private _Fields optionals[] = {_Fields.CURRENT_VALUES}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); +- tmpMap.put(_Fields.CURRENT_VALUES, new org.apache.thrift.meta_data.FieldMetaData("current_values", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Column.class)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CASResult.class, metaDataMap); +- } +- +- public CASResult() { +- } +- +- public CASResult( +- boolean success) +- { +- this(); +- this.success = success; +- setSuccessIsSet(true); +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public CASResult(CASResult other) { +- __isset_bitfield = other.__isset_bitfield; +- this.success = other.success; +- if (other.isSetCurrent_values()) { +- List __this__current_values = new ArrayList(other.current_values.size()); +- for (Column other_element : other.current_values) { +- __this__current_values.add(new Column(other_element)); +- } +- this.current_values = __this__current_values; +- } +- } +- +- public CASResult deepCopy() { +- return new CASResult(this); +- } +- +- @Override +- public void clear() { +- setSuccessIsSet(false); +- this.success = false; +- this.current_values = null; +- } +- +- public boolean isSuccess() { +- return this.success; +- } +- +- public CASResult setSuccess(boolean success) { +- this.success = success; +- setSuccessIsSet(true); +- return this; +- } +- +- public void unsetSuccess() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); +- } +- +- public void setSuccessIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); +- } +- +- public int getCurrent_valuesSize() { +- return (this.current_values == null) ? 0 : this.current_values.size(); +- } +- +- public java.util.Iterator getCurrent_valuesIterator() { +- return (this.current_values == null) ? null : this.current_values.iterator(); +- } +- +- public void addToCurrent_values(Column elem) { +- if (this.current_values == null) { +- this.current_values = new ArrayList(); +- } +- this.current_values.add(elem); +- } +- +- public List getCurrent_values() { +- return this.current_values; +- } +- +- public CASResult setCurrent_values(List current_values) { +- this.current_values = current_values; +- return this; +- } +- +- public void unsetCurrent_values() { +- this.current_values = null; +- } +- +- /** Returns true if field current_values is set (has been assigned a value) and false otherwise */ +- public boolean isSetCurrent_values() { +- return this.current_values != null; +- } +- +- public void setCurrent_valuesIsSet(boolean value) { +- if (!value) { +- this.current_values = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((Boolean)value); +- } +- break; +- +- case CURRENT_VALUES: +- if (value == null) { +- unsetCurrent_values(); +- } else { +- setCurrent_values((List)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return Boolean.valueOf(isSuccess()); +- +- case CURRENT_VALUES: +- return getCurrent_values(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case CURRENT_VALUES: +- return isSetCurrent_values(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof CASResult) +- return this.equals((CASResult)that); +- return false; +- } +- +- public boolean equals(CASResult that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true; +- boolean that_present_success = true; +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (this.success != that.success) +- return false; +- } +- +- boolean this_present_current_values = true && this.isSetCurrent_values(); +- boolean that_present_current_values = true && that.isSetCurrent_values(); +- if (this_present_current_values || that_present_current_values) { +- if (!(this_present_current_values && that_present_current_values)) +- return false; +- if (!this.current_values.equals(that.current_values)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true; +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_current_values = true && (isSetCurrent_values()); +- builder.append(present_current_values); +- if (present_current_values) +- builder.append(current_values); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(CASResult other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCurrent_values()).compareTo(other.isSetCurrent_values()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCurrent_values()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.current_values, other.current_values); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("CASResult("); +- boolean first = true; +- +- sb.append("success:"); +- sb.append(this.success); +- first = false; +- if (isSetCurrent_values()) { +- if (!first) sb.append(", "); +- sb.append("current_values:"); +- if (this.current_values == null) { +- sb.append("null"); +- } else { +- sb.append(this.current_values); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // alas, we cannot check 'success' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class CASResultStandardSchemeFactory implements SchemeFactory { +- public CASResultStandardScheme getScheme() { +- return new CASResultStandardScheme(); +- } +- } +- +- private static class CASResultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, CASResult struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { +- struct.success = iprot.readBool(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // CURRENT_VALUES +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list48 = iprot.readListBegin(); +- struct.current_values = new ArrayList(_list48.size); +- for (int _i49 = 0; _i49 < _list48.size; ++_i49) +- { +- Column _elem50; +- _elem50 = new Column(); +- _elem50.read(iprot); +- struct.current_values.add(_elem50); +- } +- iprot.readListEnd(); +- } +- struct.setCurrent_valuesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetSuccess()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'success' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, CASResult struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeBool(struct.success); +- oprot.writeFieldEnd(); +- if (struct.current_values != null) { +- if (struct.isSetCurrent_values()) { +- oprot.writeFieldBegin(CURRENT_VALUES_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.current_values.size())); +- for (Column _iter51 : struct.current_values) +- { +- _iter51.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class CASResultTupleSchemeFactory implements SchemeFactory { +- public CASResultTupleScheme getScheme() { +- return new CASResultTupleScheme(); +- } +- } +- +- private static class CASResultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, CASResult struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBool(struct.success); +- BitSet optionals = new BitSet(); +- if (struct.isSetCurrent_values()) { +- optionals.set(0); +- } +- oprot.writeBitSet(optionals, 1); +- if (struct.isSetCurrent_values()) { +- { +- oprot.writeI32(struct.current_values.size()); +- for (Column _iter52 : struct.current_values) +- { +- _iter52.write(oprot); +- } +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, CASResult struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.success = iprot.readBool(); +- struct.setSuccessIsSet(true); +- BitSet incoming = iprot.readBitSet(1); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list53 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.current_values = new ArrayList(_list53.size); +- for (int _i54 = 0; _i54 < _list53.size; ++_i54) +- { +- Column _elem55; +- _elem55 = new Column(); +- _elem55.read(iprot); +- struct.current_values.add(_elem55); +- } +- } +- struct.setCurrent_valuesIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/Cassandra.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/Cassandra.java +deleted file mode 100644 +index cd4314b..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/Cassandra.java ++++ /dev/null +@@ -1,55794 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class Cassandra { +- +- public interface Iface { +- +- public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, org.apache.thrift.TException; +- +- public void set_keyspace(String keyspace) throws InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is +- * the only method that can throw an exception under non-failure conditions.) +- * +- * @param key +- * @param column_path +- * @param consistency_level +- */ +- public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level) throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name +- * pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned. +- * +- * @param key +- * @param column_parent +- * @param predicate +- * @param consistency_level +- */ +- public List get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * returns the number of columns matching predicate for a particular key, +- * ColumnFamily and optionally SuperColumn. +- * +- * @param key +- * @param column_parent +- * @param predicate +- * @param consistency_level +- */ +- public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Performs a get_slice for column_parent and predicate for the given keys in parallel. +- * +- * @param keys +- * @param column_parent +- * @param predicate +- * @param consistency_level +- */ +- public Map> multiget_slice(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Perform a get_count in parallel on the given list keys. The return value maps keys to the count found. +- * +- * @param keys +- * @param column_parent +- * @param predicate +- * @param consistency_level +- */ +- public Map multiget_count(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * returns a subset of columns for a contiguous range of keys. +- * +- * @param column_parent +- * @param predicate +- * @param range +- * @param consistency_level +- */ +- public List get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * returns a range of columns, wrapping to the next rows if necessary to collect max_results. +- * +- * @param column_family +- * @param range +- * @param start_column +- * @param consistency_level +- */ +- public List get_paged_slice(String column_family, KeyRange range, ByteBuffer start_column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause +- * @deprecated use get_range_slices instead with range.row_filter specified +- * +- * @param column_parent +- * @param index_clause +- * @param column_predicate +- * @param consistency_level +- */ +- public List get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Insert a Column at the given column_parent.column_family and optional column_parent.super_column. +- * +- * @param key +- * @param column_parent +- * @param column +- * @param consistency_level +- */ +- public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Increment or decrement a counter. +- * +- * @param key +- * @param column_parent +- * @param column +- * @param consistency_level +- */ +- public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Atomic compare and set. +- * +- * If the cas is successfull, the success boolean in CASResult will be true and there will be no current_values. +- * Otherwise, success will be false and current_values will contain the current values for the columns in +- * expected (that, by definition of compare-and-set, will differ from the values in expected). +- * +- * A cas operation takes 2 consistency level. The first one, serial_consistency_level, simply indicates the +- * level of serialization required. This can be either ConsistencyLevel.SERIAL or ConsistencyLevel.LOCAL_SERIAL. +- * The second one, commit_consistency_level, defines the consistency level for the commit phase of the cas. This +- * is a more traditional consistency level (the same CL than for traditional writes are accepted) that impact +- * the visibility for reads of the operation. For instance, if commit_consistency_level is QUORUM, then it is +- * guaranteed that a followup QUORUM read will see the cas write (if that one was successful obviously). If +- * commit_consistency_level is ANY, you will need to use a SERIAL/LOCAL_SERIAL read to be guaranteed to see +- * the write. +- * +- * @param key +- * @param column_family +- * @param expected +- * @param updates +- * @param serial_consistency_level +- * @param commit_consistency_level +- */ +- public CASResult cas(ByteBuffer key, String column_family, List expected, List updates, ConsistencyLevel serial_consistency_level, ConsistencyLevel commit_consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note +- * that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire +- * row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too. +- * +- * @param key +- * @param column_path +- * @param timestamp +- * @param consistency_level +- */ +- public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Remove a counter at the specified location. +- * Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update +- * until the delete has reached all the nodes and all of them have been fully compacted. +- * +- * @param key +- * @param path +- * @param consistency_level +- */ +- public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Mutate many columns or super columns for many row keys. See also: Mutation. +- * +- * mutation_map maps key to column family to a list of Mutation objects to take place at that scope. +- * * +- * +- * @param mutation_map +- * @param consistency_level +- */ +- public void batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Atomically mutate many columns or super columns for many row keys. See also: Mutation. +- * +- * mutation_map maps key to column family to a list of Mutation objects to take place at that scope. +- * * +- * +- * @param mutation_map +- * @param consistency_level +- */ +- public void atomic_batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Truncate will mark and entire column family as deleted. +- * From the user's perspective a successful call to truncate will result complete data deletion from cfname. +- * Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one +- * only marks the data as deleted. +- * The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if +- * some hosts are down. +- * +- * @param cfname +- */ +- public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * Select multiple slices of a key in a single RPC operation +- * +- * @param request +- */ +- public List get_multi_slice(MultiSliceRequest request) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException; +- +- /** +- * for each schema version present in the cluster, returns a list of nodes at that version. +- * hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION. +- * the cluster is all on the same version if the size of the map is 1. +- */ +- public Map> describe_schema_versions() throws InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * list the defined keyspaces in this cluster +- */ +- public List describe_keyspaces() throws InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * get the cluster name +- */ +- public String describe_cluster_name() throws org.apache.thrift.TException; +- +- /** +- * get the thrift api version +- */ +- public String describe_version() throws org.apache.thrift.TException; +- +- /** +- * get the token ring: a map of ranges to host addresses, +- * represented as a set of TokenRange instead of a map from range +- * to list of endpoints, because you can't use Thrift structs as +- * map keys: +- * https://issues.apache.org/jira/browse/THRIFT-162 +- * +- * for the same reason, we can't return a set here, even though +- * order is neither important nor predictable. +- * +- * @param keyspace +- */ +- public List describe_ring(String keyspace) throws InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * same as describe_ring, but considers only nodes in the local DC +- * +- * @param keyspace +- */ +- public List describe_local_ring(String keyspace) throws InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * get the mapping between token->node ip +- * without taking replication into consideration +- * https://issues.apache.org/jira/browse/CASSANDRA-4092 +- */ +- public Map describe_token_map() throws InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * returns the partitioner used by this cluster +- */ +- public String describe_partitioner() throws org.apache.thrift.TException; +- +- /** +- * returns the snitch used by this cluster +- */ +- public String describe_snitch() throws org.apache.thrift.TException; +- +- /** +- * describe specified keyspace +- * +- * @param keyspace +- */ +- public KsDef describe_keyspace(String keyspace) throws NotFoundException, InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * experimental API for hadoop/parallel query support. +- * may change violently and without warning. +- * +- * returns list of token strings such that first subrange is (list[0], list[1]], +- * next is (list[1], list[2]], etc. +- * +- * @param cfName +- * @param start_token +- * @param end_token +- * @param keys_per_split +- */ +- public List describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * Enables tracing for the next query in this connection and returns the UUID for that trace session +- * The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace +- */ +- public ByteBuffer trace_next_query() throws org.apache.thrift.TException; +- +- public List describe_splits_ex(String cfName, String start_token, String end_token, int keys_per_split) throws InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * adds a column family. returns the new schema id. +- * +- * @param cf_def +- */ +- public String system_add_column_family(CfDef cf_def) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException; +- +- /** +- * drops a column family. returns the new schema id. +- * +- * @param column_family +- */ +- public String system_drop_column_family(String column_family) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException; +- +- /** +- * adds a keyspace and any column families that are part of it. returns the new schema id. +- * +- * @param ks_def +- */ +- public String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException; +- +- /** +- * drops a keyspace and any column families that are part of it. returns the new schema id. +- * +- * @param keyspace +- */ +- public String system_drop_keyspace(String keyspace) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException; +- +- /** +- * updates properties of a keyspace. returns the new schema id. +- * +- * @param ks_def +- */ +- public String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException; +- +- /** +- * updates properties of a column family. returns the new schema id. +- * +- * @param cf_def +- */ +- public String system_update_column_family(CfDef cf_def) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException; +- +- /** +- * @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead. +- * +- * @param query +- * @param compression +- */ +- public CqlResult execute_cql_query(ByteBuffer query, Compression compression) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException; +- +- /** +- * Executes a CQL3 (Cassandra Query Language) statement and returns a +- * CqlResult containing the results. +- * +- * @param query +- * @param compression +- * @param consistency +- */ +- public CqlResult execute_cql3_query(ByteBuffer query, Compression compression, ConsistencyLevel consistency) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException; +- +- /** +- * @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead. +- * +- * @param query +- * @param compression +- */ +- public CqlPreparedResult prepare_cql_query(ByteBuffer query, Compression compression) throws InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * Prepare a CQL3 (Cassandra Query Language) statement by compiling and returning +- * - the type of CQL statement +- * - an id token of the compiled CQL stored on the server side. +- * - a count of the discovered bound markers in the statement +- * +- * @param query +- * @param compression +- */ +- public CqlPreparedResult prepare_cql3_query(ByteBuffer query, Compression compression) throws InvalidRequestException, org.apache.thrift.TException; +- +- /** +- * @deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead. +- * +- * @param itemId +- * @param values +- */ +- public CqlResult execute_prepared_cql_query(int itemId, List values) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException; +- +- /** +- * Executes a prepared CQL3 (Cassandra Query Language) statement by passing an id token, a list of variables +- * to bind, and the consistency level, and returns a CqlResult containing the results. +- * +- * @param itemId +- * @param values +- * @param consistency +- */ +- public CqlResult execute_prepared_cql3_query(int itemId, List values, ConsistencyLevel consistency) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException; +- +- /** +- * @deprecated This is now a no-op. Please use the CQL3 specific methods instead. +- * +- * @param version +- */ +- public void set_cql_version(String version) throws InvalidRequestException, org.apache.thrift.TException; +- +- } +- +- public interface AsyncIface { +- +- public void login(AuthenticationRequest auth_request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void set_keyspace(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void multiget_slice(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void multiget_count(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void get_paged_slice(String column_family, KeyRange range, ByteBuffer start_column, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void cas(ByteBuffer key, String column_family, List expected, List updates, ConsistencyLevel serial_consistency_level, ConsistencyLevel commit_consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void atomic_batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void truncate(String cfname, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void get_multi_slice(MultiSliceRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_schema_versions(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_keyspaces(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_cluster_name(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_version(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_ring(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_local_ring(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_token_map(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_partitioner(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_snitch(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_keyspace(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_splits(String cfName, String start_token, String end_token, int keys_per_split, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void trace_next_query(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void describe_splits_ex(String cfName, String start_token, String end_token, int keys_per_split, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void system_add_column_family(CfDef cf_def, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void system_drop_column_family(String column_family, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void system_add_keyspace(KsDef ks_def, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void system_drop_keyspace(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void system_update_keyspace(KsDef ks_def, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void system_update_column_family(CfDef cf_def, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void execute_cql_query(ByteBuffer query, Compression compression, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void execute_cql3_query(ByteBuffer query, Compression compression, ConsistencyLevel consistency, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void prepare_cql_query(ByteBuffer query, Compression compression, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void prepare_cql3_query(ByteBuffer query, Compression compression, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void execute_prepared_cql_query(int itemId, List values, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void execute_prepared_cql3_query(int itemId, List values, ConsistencyLevel consistency, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- public void set_cql_version(String version, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +- +- } +- +- public static class Client extends org.apache.thrift.TServiceClient implements Iface { +- public static class Factory implements org.apache.thrift.TServiceClientFactory { +- public Factory() {} +- public Client getClient(org.apache.thrift.protocol.TProtocol prot) { +- return new Client(prot); +- } +- public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { +- return new Client(iprot, oprot); +- } +- } +- +- public Client(org.apache.thrift.protocol.TProtocol prot) +- { +- super(prot, prot); +- } +- +- public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { +- super(iprot, oprot); +- } +- +- public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, org.apache.thrift.TException +- { +- send_login(auth_request); +- recv_login(); +- } +- +- public void send_login(AuthenticationRequest auth_request) throws org.apache.thrift.TException +- { +- login_args args = new login_args(); +- args.setAuth_request(auth_request); +- sendBase("login", args); +- } +- +- public void recv_login() throws AuthenticationException, AuthorizationException, org.apache.thrift.TException +- { +- login_result result = new login_result(); +- receiveBase(result, "login"); +- if (result.authnx != null) { +- throw result.authnx; +- } +- if (result.authzx != null) { +- throw result.authzx; +- } +- return; +- } +- +- public void set_keyspace(String keyspace) throws InvalidRequestException, org.apache.thrift.TException +- { +- send_set_keyspace(keyspace); +- recv_set_keyspace(); +- } +- +- public void send_set_keyspace(String keyspace) throws org.apache.thrift.TException +- { +- set_keyspace_args args = new set_keyspace_args(); +- args.setKeyspace(keyspace); +- sendBase("set_keyspace", args); +- } +- +- public void recv_set_keyspace() throws InvalidRequestException, org.apache.thrift.TException +- { +- set_keyspace_result result = new set_keyspace_result(); +- receiveBase(result, "set_keyspace"); +- if (result.ire != null) { +- throw result.ire; +- } +- return; +- } +- +- public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level) throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_get(key, column_path, consistency_level); +- return recv_get(); +- } +- +- public void send_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- get_args args = new get_args(); +- args.setKey(key); +- args.setColumn_path(column_path); +- args.setConsistency_level(consistency_level); +- sendBase("get", args); +- } +- +- public ColumnOrSuperColumn recv_get() throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- get_result result = new get_result(); +- receiveBase(result, "get"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.nfe != null) { +- throw result.nfe; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get failed: unknown result"); +- } +- +- public List get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_get_slice(key, column_parent, predicate, consistency_level); +- return recv_get_slice(); +- } +- +- public void send_get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- get_slice_args args = new get_slice_args(); +- args.setKey(key); +- args.setColumn_parent(column_parent); +- args.setPredicate(predicate); +- args.setConsistency_level(consistency_level); +- sendBase("get_slice", args); +- } +- +- public List recv_get_slice() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- get_slice_result result = new get_slice_result(); +- receiveBase(result, "get_slice"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_slice failed: unknown result"); +- } +- +- public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_get_count(key, column_parent, predicate, consistency_level); +- return recv_get_count(); +- } +- +- public void send_get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- get_count_args args = new get_count_args(); +- args.setKey(key); +- args.setColumn_parent(column_parent); +- args.setPredicate(predicate); +- args.setConsistency_level(consistency_level); +- sendBase("get_count", args); +- } +- +- public int recv_get_count() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- get_count_result result = new get_count_result(); +- receiveBase(result, "get_count"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_count failed: unknown result"); +- } +- +- public Map> multiget_slice(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_multiget_slice(keys, column_parent, predicate, consistency_level); +- return recv_multiget_slice(); +- } +- +- public void send_multiget_slice(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- multiget_slice_args args = new multiget_slice_args(); +- args.setKeys(keys); +- args.setColumn_parent(column_parent); +- args.setPredicate(predicate); +- args.setConsistency_level(consistency_level); +- sendBase("multiget_slice", args); +- } +- +- public Map> recv_multiget_slice() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- multiget_slice_result result = new multiget_slice_result(); +- receiveBase(result, "multiget_slice"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "multiget_slice failed: unknown result"); +- } +- +- public Map multiget_count(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_multiget_count(keys, column_parent, predicate, consistency_level); +- return recv_multiget_count(); +- } +- +- public void send_multiget_count(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- multiget_count_args args = new multiget_count_args(); +- args.setKeys(keys); +- args.setColumn_parent(column_parent); +- args.setPredicate(predicate); +- args.setConsistency_level(consistency_level); +- sendBase("multiget_count", args); +- } +- +- public Map recv_multiget_count() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- multiget_count_result result = new multiget_count_result(); +- receiveBase(result, "multiget_count"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "multiget_count failed: unknown result"); +- } +- +- public List get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_get_range_slices(column_parent, predicate, range, consistency_level); +- return recv_get_range_slices(); +- } +- +- public void send_get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- get_range_slices_args args = new get_range_slices_args(); +- args.setColumn_parent(column_parent); +- args.setPredicate(predicate); +- args.setRange(range); +- args.setConsistency_level(consistency_level); +- sendBase("get_range_slices", args); +- } +- +- public List recv_get_range_slices() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- get_range_slices_result result = new get_range_slices_result(); +- receiveBase(result, "get_range_slices"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_range_slices failed: unknown result"); +- } +- +- public List get_paged_slice(String column_family, KeyRange range, ByteBuffer start_column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_get_paged_slice(column_family, range, start_column, consistency_level); +- return recv_get_paged_slice(); +- } +- +- public void send_get_paged_slice(String column_family, KeyRange range, ByteBuffer start_column, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- get_paged_slice_args args = new get_paged_slice_args(); +- args.setColumn_family(column_family); +- args.setRange(range); +- args.setStart_column(start_column); +- args.setConsistency_level(consistency_level); +- sendBase("get_paged_slice", args); +- } +- +- public List recv_get_paged_slice() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- get_paged_slice_result result = new get_paged_slice_result(); +- receiveBase(result, "get_paged_slice"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_paged_slice failed: unknown result"); +- } +- +- public List get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level); +- return recv_get_indexed_slices(); +- } +- +- public void send_get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- get_indexed_slices_args args = new get_indexed_slices_args(); +- args.setColumn_parent(column_parent); +- args.setIndex_clause(index_clause); +- args.setColumn_predicate(column_predicate); +- args.setConsistency_level(consistency_level); +- sendBase("get_indexed_slices", args); +- } +- +- public List recv_get_indexed_slices() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- get_indexed_slices_result result = new get_indexed_slices_result(); +- receiveBase(result, "get_indexed_slices"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_indexed_slices failed: unknown result"); +- } +- +- public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_insert(key, column_parent, column, consistency_level); +- recv_insert(); +- } +- +- public void send_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- insert_args args = new insert_args(); +- args.setKey(key); +- args.setColumn_parent(column_parent); +- args.setColumn(column); +- args.setConsistency_level(consistency_level); +- sendBase("insert", args); +- } +- +- public void recv_insert() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- insert_result result = new insert_result(); +- receiveBase(result, "insert"); +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- return; +- } +- +- public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_add(key, column_parent, column, consistency_level); +- recv_add(); +- } +- +- public void send_add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- add_args args = new add_args(); +- args.setKey(key); +- args.setColumn_parent(column_parent); +- args.setColumn(column); +- args.setConsistency_level(consistency_level); +- sendBase("add", args); +- } +- +- public void recv_add() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- add_result result = new add_result(); +- receiveBase(result, "add"); +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- return; +- } +- +- public CASResult cas(ByteBuffer key, String column_family, List expected, List updates, ConsistencyLevel serial_consistency_level, ConsistencyLevel commit_consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_cas(key, column_family, expected, updates, serial_consistency_level, commit_consistency_level); +- return recv_cas(); +- } +- +- public void send_cas(ByteBuffer key, String column_family, List expected, List updates, ConsistencyLevel serial_consistency_level, ConsistencyLevel commit_consistency_level) throws org.apache.thrift.TException +- { +- cas_args args = new cas_args(); +- args.setKey(key); +- args.setColumn_family(column_family); +- args.setExpected(expected); +- args.setUpdates(updates); +- args.setSerial_consistency_level(serial_consistency_level); +- args.setCommit_consistency_level(commit_consistency_level); +- sendBase("cas", args); +- } +- +- public CASResult recv_cas() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- cas_result result = new cas_result(); +- receiveBase(result, "cas"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "cas failed: unknown result"); +- } +- +- public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_remove(key, column_path, timestamp, consistency_level); +- recv_remove(); +- } +- +- public void send_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- remove_args args = new remove_args(); +- args.setKey(key); +- args.setColumn_path(column_path); +- args.setTimestamp(timestamp); +- args.setConsistency_level(consistency_level); +- sendBase("remove", args); +- } +- +- public void recv_remove() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- remove_result result = new remove_result(); +- receiveBase(result, "remove"); +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- return; +- } +- +- public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_remove_counter(key, path, consistency_level); +- recv_remove_counter(); +- } +- +- public void send_remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- remove_counter_args args = new remove_counter_args(); +- args.setKey(key); +- args.setPath(path); +- args.setConsistency_level(consistency_level); +- sendBase("remove_counter", args); +- } +- +- public void recv_remove_counter() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- remove_counter_result result = new remove_counter_result(); +- receiveBase(result, "remove_counter"); +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- return; +- } +- +- public void batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_batch_mutate(mutation_map, consistency_level); +- recv_batch_mutate(); +- } +- +- public void send_batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- batch_mutate_args args = new batch_mutate_args(); +- args.setMutation_map(mutation_map); +- args.setConsistency_level(consistency_level); +- sendBase("batch_mutate", args); +- } +- +- public void recv_batch_mutate() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- batch_mutate_result result = new batch_mutate_result(); +- receiveBase(result, "batch_mutate"); +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- return; +- } +- +- public void atomic_batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_atomic_batch_mutate(mutation_map, consistency_level); +- recv_atomic_batch_mutate(); +- } +- +- public void send_atomic_batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level) throws org.apache.thrift.TException +- { +- atomic_batch_mutate_args args = new atomic_batch_mutate_args(); +- args.setMutation_map(mutation_map); +- args.setConsistency_level(consistency_level); +- sendBase("atomic_batch_mutate", args); +- } +- +- public void recv_atomic_batch_mutate() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- atomic_batch_mutate_result result = new atomic_batch_mutate_result(); +- receiveBase(result, "atomic_batch_mutate"); +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- return; +- } +- +- public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_truncate(cfname); +- recv_truncate(); +- } +- +- public void send_truncate(String cfname) throws org.apache.thrift.TException +- { +- truncate_args args = new truncate_args(); +- args.setCfname(cfname); +- sendBase("truncate", args); +- } +- +- public void recv_truncate() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- truncate_result result = new truncate_result(); +- receiveBase(result, "truncate"); +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- return; +- } +- +- public List get_multi_slice(MultiSliceRequest request) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- send_get_multi_slice(request); +- return recv_get_multi_slice(); +- } +- +- public void send_get_multi_slice(MultiSliceRequest request) throws org.apache.thrift.TException +- { +- get_multi_slice_args args = new get_multi_slice_args(); +- args.setRequest(request); +- sendBase("get_multi_slice", args); +- } +- +- public List recv_get_multi_slice() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException +- { +- get_multi_slice_result result = new get_multi_slice_result(); +- receiveBase(result, "get_multi_slice"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_multi_slice failed: unknown result"); +- } +- +- public Map> describe_schema_versions() throws InvalidRequestException, org.apache.thrift.TException +- { +- send_describe_schema_versions(); +- return recv_describe_schema_versions(); +- } +- +- public void send_describe_schema_versions() throws org.apache.thrift.TException +- { +- describe_schema_versions_args args = new describe_schema_versions_args(); +- sendBase("describe_schema_versions", args); +- } +- +- public Map> recv_describe_schema_versions() throws InvalidRequestException, org.apache.thrift.TException +- { +- describe_schema_versions_result result = new describe_schema_versions_result(); +- receiveBase(result, "describe_schema_versions"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_schema_versions failed: unknown result"); +- } +- +- public List describe_keyspaces() throws InvalidRequestException, org.apache.thrift.TException +- { +- send_describe_keyspaces(); +- return recv_describe_keyspaces(); +- } +- +- public void send_describe_keyspaces() throws org.apache.thrift.TException +- { +- describe_keyspaces_args args = new describe_keyspaces_args(); +- sendBase("describe_keyspaces", args); +- } +- +- public List recv_describe_keyspaces() throws InvalidRequestException, org.apache.thrift.TException +- { +- describe_keyspaces_result result = new describe_keyspaces_result(); +- receiveBase(result, "describe_keyspaces"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_keyspaces failed: unknown result"); +- } +- +- public String describe_cluster_name() throws org.apache.thrift.TException +- { +- send_describe_cluster_name(); +- return recv_describe_cluster_name(); +- } +- +- public void send_describe_cluster_name() throws org.apache.thrift.TException +- { +- describe_cluster_name_args args = new describe_cluster_name_args(); +- sendBase("describe_cluster_name", args); +- } +- +- public String recv_describe_cluster_name() throws org.apache.thrift.TException +- { +- describe_cluster_name_result result = new describe_cluster_name_result(); +- receiveBase(result, "describe_cluster_name"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_cluster_name failed: unknown result"); +- } +- +- public String describe_version() throws org.apache.thrift.TException +- { +- send_describe_version(); +- return recv_describe_version(); +- } +- +- public void send_describe_version() throws org.apache.thrift.TException +- { +- describe_version_args args = new describe_version_args(); +- sendBase("describe_version", args); +- } +- +- public String recv_describe_version() throws org.apache.thrift.TException +- { +- describe_version_result result = new describe_version_result(); +- receiveBase(result, "describe_version"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_version failed: unknown result"); +- } +- +- public List describe_ring(String keyspace) throws InvalidRequestException, org.apache.thrift.TException +- { +- send_describe_ring(keyspace); +- return recv_describe_ring(); +- } +- +- public void send_describe_ring(String keyspace) throws org.apache.thrift.TException +- { +- describe_ring_args args = new describe_ring_args(); +- args.setKeyspace(keyspace); +- sendBase("describe_ring", args); +- } +- +- public List recv_describe_ring() throws InvalidRequestException, org.apache.thrift.TException +- { +- describe_ring_result result = new describe_ring_result(); +- receiveBase(result, "describe_ring"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_ring failed: unknown result"); +- } +- +- public List describe_local_ring(String keyspace) throws InvalidRequestException, org.apache.thrift.TException +- { +- send_describe_local_ring(keyspace); +- return recv_describe_local_ring(); +- } +- +- public void send_describe_local_ring(String keyspace) throws org.apache.thrift.TException +- { +- describe_local_ring_args args = new describe_local_ring_args(); +- args.setKeyspace(keyspace); +- sendBase("describe_local_ring", args); +- } +- +- public List recv_describe_local_ring() throws InvalidRequestException, org.apache.thrift.TException +- { +- describe_local_ring_result result = new describe_local_ring_result(); +- receiveBase(result, "describe_local_ring"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_local_ring failed: unknown result"); +- } +- +- public Map describe_token_map() throws InvalidRequestException, org.apache.thrift.TException +- { +- send_describe_token_map(); +- return recv_describe_token_map(); +- } +- +- public void send_describe_token_map() throws org.apache.thrift.TException +- { +- describe_token_map_args args = new describe_token_map_args(); +- sendBase("describe_token_map", args); +- } +- +- public Map recv_describe_token_map() throws InvalidRequestException, org.apache.thrift.TException +- { +- describe_token_map_result result = new describe_token_map_result(); +- receiveBase(result, "describe_token_map"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_token_map failed: unknown result"); +- } +- +- public String describe_partitioner() throws org.apache.thrift.TException +- { +- send_describe_partitioner(); +- return recv_describe_partitioner(); +- } +- +- public void send_describe_partitioner() throws org.apache.thrift.TException +- { +- describe_partitioner_args args = new describe_partitioner_args(); +- sendBase("describe_partitioner", args); +- } +- +- public String recv_describe_partitioner() throws org.apache.thrift.TException +- { +- describe_partitioner_result result = new describe_partitioner_result(); +- receiveBase(result, "describe_partitioner"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_partitioner failed: unknown result"); +- } +- +- public String describe_snitch() throws org.apache.thrift.TException +- { +- send_describe_snitch(); +- return recv_describe_snitch(); +- } +- +- public void send_describe_snitch() throws org.apache.thrift.TException +- { +- describe_snitch_args args = new describe_snitch_args(); +- sendBase("describe_snitch", args); +- } +- +- public String recv_describe_snitch() throws org.apache.thrift.TException +- { +- describe_snitch_result result = new describe_snitch_result(); +- receiveBase(result, "describe_snitch"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_snitch failed: unknown result"); +- } +- +- public KsDef describe_keyspace(String keyspace) throws NotFoundException, InvalidRequestException, org.apache.thrift.TException +- { +- send_describe_keyspace(keyspace); +- return recv_describe_keyspace(); +- } +- +- public void send_describe_keyspace(String keyspace) throws org.apache.thrift.TException +- { +- describe_keyspace_args args = new describe_keyspace_args(); +- args.setKeyspace(keyspace); +- sendBase("describe_keyspace", args); +- } +- +- public KsDef recv_describe_keyspace() throws NotFoundException, InvalidRequestException, org.apache.thrift.TException +- { +- describe_keyspace_result result = new describe_keyspace_result(); +- receiveBase(result, "describe_keyspace"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.nfe != null) { +- throw result.nfe; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_keyspace failed: unknown result"); +- } +- +- public List describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws InvalidRequestException, org.apache.thrift.TException +- { +- send_describe_splits(cfName, start_token, end_token, keys_per_split); +- return recv_describe_splits(); +- } +- +- public void send_describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws org.apache.thrift.TException +- { +- describe_splits_args args = new describe_splits_args(); +- args.setCfName(cfName); +- args.setStart_token(start_token); +- args.setEnd_token(end_token); +- args.setKeys_per_split(keys_per_split); +- sendBase("describe_splits", args); +- } +- +- public List recv_describe_splits() throws InvalidRequestException, org.apache.thrift.TException +- { +- describe_splits_result result = new describe_splits_result(); +- receiveBase(result, "describe_splits"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_splits failed: unknown result"); +- } +- +- public ByteBuffer trace_next_query() throws org.apache.thrift.TException +- { +- send_trace_next_query(); +- return recv_trace_next_query(); +- } +- +- public void send_trace_next_query() throws org.apache.thrift.TException +- { +- trace_next_query_args args = new trace_next_query_args(); +- sendBase("trace_next_query", args); +- } +- +- public ByteBuffer recv_trace_next_query() throws org.apache.thrift.TException +- { +- trace_next_query_result result = new trace_next_query_result(); +- receiveBase(result, "trace_next_query"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "trace_next_query failed: unknown result"); +- } +- +- public List describe_splits_ex(String cfName, String start_token, String end_token, int keys_per_split) throws InvalidRequestException, org.apache.thrift.TException +- { +- send_describe_splits_ex(cfName, start_token, end_token, keys_per_split); +- return recv_describe_splits_ex(); +- } +- +- public void send_describe_splits_ex(String cfName, String start_token, String end_token, int keys_per_split) throws org.apache.thrift.TException +- { +- describe_splits_ex_args args = new describe_splits_ex_args(); +- args.setCfName(cfName); +- args.setStart_token(start_token); +- args.setEnd_token(end_token); +- args.setKeys_per_split(keys_per_split); +- sendBase("describe_splits_ex", args); +- } +- +- public List recv_describe_splits_ex() throws InvalidRequestException, org.apache.thrift.TException +- { +- describe_splits_ex_result result = new describe_splits_ex_result(); +- receiveBase(result, "describe_splits_ex"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "describe_splits_ex failed: unknown result"); +- } +- +- public String system_add_column_family(CfDef cf_def) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- send_system_add_column_family(cf_def); +- return recv_system_add_column_family(); +- } +- +- public void send_system_add_column_family(CfDef cf_def) throws org.apache.thrift.TException +- { +- system_add_column_family_args args = new system_add_column_family_args(); +- args.setCf_def(cf_def); +- sendBase("system_add_column_family", args); +- } +- +- public String recv_system_add_column_family() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- system_add_column_family_result result = new system_add_column_family_result(); +- receiveBase(result, "system_add_column_family"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.sde != null) { +- throw result.sde; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "system_add_column_family failed: unknown result"); +- } +- +- public String system_drop_column_family(String column_family) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- send_system_drop_column_family(column_family); +- return recv_system_drop_column_family(); +- } +- +- public void send_system_drop_column_family(String column_family) throws org.apache.thrift.TException +- { +- system_drop_column_family_args args = new system_drop_column_family_args(); +- args.setColumn_family(column_family); +- sendBase("system_drop_column_family", args); +- } +- +- public String recv_system_drop_column_family() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- system_drop_column_family_result result = new system_drop_column_family_result(); +- receiveBase(result, "system_drop_column_family"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.sde != null) { +- throw result.sde; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "system_drop_column_family failed: unknown result"); +- } +- +- public String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- send_system_add_keyspace(ks_def); +- return recv_system_add_keyspace(); +- } +- +- public void send_system_add_keyspace(KsDef ks_def) throws org.apache.thrift.TException +- { +- system_add_keyspace_args args = new system_add_keyspace_args(); +- args.setKs_def(ks_def); +- sendBase("system_add_keyspace", args); +- } +- +- public String recv_system_add_keyspace() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- system_add_keyspace_result result = new system_add_keyspace_result(); +- receiveBase(result, "system_add_keyspace"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.sde != null) { +- throw result.sde; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "system_add_keyspace failed: unknown result"); +- } +- +- public String system_drop_keyspace(String keyspace) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- send_system_drop_keyspace(keyspace); +- return recv_system_drop_keyspace(); +- } +- +- public void send_system_drop_keyspace(String keyspace) throws org.apache.thrift.TException +- { +- system_drop_keyspace_args args = new system_drop_keyspace_args(); +- args.setKeyspace(keyspace); +- sendBase("system_drop_keyspace", args); +- } +- +- public String recv_system_drop_keyspace() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- system_drop_keyspace_result result = new system_drop_keyspace_result(); +- receiveBase(result, "system_drop_keyspace"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.sde != null) { +- throw result.sde; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "system_drop_keyspace failed: unknown result"); +- } +- +- public String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- send_system_update_keyspace(ks_def); +- return recv_system_update_keyspace(); +- } +- +- public void send_system_update_keyspace(KsDef ks_def) throws org.apache.thrift.TException +- { +- system_update_keyspace_args args = new system_update_keyspace_args(); +- args.setKs_def(ks_def); +- sendBase("system_update_keyspace", args); +- } +- +- public String recv_system_update_keyspace() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- system_update_keyspace_result result = new system_update_keyspace_result(); +- receiveBase(result, "system_update_keyspace"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.sde != null) { +- throw result.sde; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "system_update_keyspace failed: unknown result"); +- } +- +- public String system_update_column_family(CfDef cf_def) throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- send_system_update_column_family(cf_def); +- return recv_system_update_column_family(); +- } +- +- public void send_system_update_column_family(CfDef cf_def) throws org.apache.thrift.TException +- { +- system_update_column_family_args args = new system_update_column_family_args(); +- args.setCf_def(cf_def); +- sendBase("system_update_column_family", args); +- } +- +- public String recv_system_update_column_family() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException +- { +- system_update_column_family_result result = new system_update_column_family_result(); +- receiveBase(result, "system_update_column_family"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.sde != null) { +- throw result.sde; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "system_update_column_family failed: unknown result"); +- } +- +- public CqlResult execute_cql_query(ByteBuffer query, Compression compression) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException +- { +- send_execute_cql_query(query, compression); +- return recv_execute_cql_query(); +- } +- +- public void send_execute_cql_query(ByteBuffer query, Compression compression) throws org.apache.thrift.TException +- { +- execute_cql_query_args args = new execute_cql_query_args(); +- args.setQuery(query); +- args.setCompression(compression); +- sendBase("execute_cql_query", args); +- } +- +- public CqlResult recv_execute_cql_query() throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException +- { +- execute_cql_query_result result = new execute_cql_query_result(); +- receiveBase(result, "execute_cql_query"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- if (result.sde != null) { +- throw result.sde; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "execute_cql_query failed: unknown result"); +- } +- +- public CqlResult execute_cql3_query(ByteBuffer query, Compression compression, ConsistencyLevel consistency) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException +- { +- send_execute_cql3_query(query, compression, consistency); +- return recv_execute_cql3_query(); +- } +- +- public void send_execute_cql3_query(ByteBuffer query, Compression compression, ConsistencyLevel consistency) throws org.apache.thrift.TException +- { +- execute_cql3_query_args args = new execute_cql3_query_args(); +- args.setQuery(query); +- args.setCompression(compression); +- args.setConsistency(consistency); +- sendBase("execute_cql3_query", args); +- } +- +- public CqlResult recv_execute_cql3_query() throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException +- { +- execute_cql3_query_result result = new execute_cql3_query_result(); +- receiveBase(result, "execute_cql3_query"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- if (result.sde != null) { +- throw result.sde; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "execute_cql3_query failed: unknown result"); +- } +- +- public CqlPreparedResult prepare_cql_query(ByteBuffer query, Compression compression) throws InvalidRequestException, org.apache.thrift.TException +- { +- send_prepare_cql_query(query, compression); +- return recv_prepare_cql_query(); +- } +- +- public void send_prepare_cql_query(ByteBuffer query, Compression compression) throws org.apache.thrift.TException +- { +- prepare_cql_query_args args = new prepare_cql_query_args(); +- args.setQuery(query); +- args.setCompression(compression); +- sendBase("prepare_cql_query", args); +- } +- +- public CqlPreparedResult recv_prepare_cql_query() throws InvalidRequestException, org.apache.thrift.TException +- { +- prepare_cql_query_result result = new prepare_cql_query_result(); +- receiveBase(result, "prepare_cql_query"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "prepare_cql_query failed: unknown result"); +- } +- +- public CqlPreparedResult prepare_cql3_query(ByteBuffer query, Compression compression) throws InvalidRequestException, org.apache.thrift.TException +- { +- send_prepare_cql3_query(query, compression); +- return recv_prepare_cql3_query(); +- } +- +- public void send_prepare_cql3_query(ByteBuffer query, Compression compression) throws org.apache.thrift.TException +- { +- prepare_cql3_query_args args = new prepare_cql3_query_args(); +- args.setQuery(query); +- args.setCompression(compression); +- sendBase("prepare_cql3_query", args); +- } +- +- public CqlPreparedResult recv_prepare_cql3_query() throws InvalidRequestException, org.apache.thrift.TException +- { +- prepare_cql3_query_result result = new prepare_cql3_query_result(); +- receiveBase(result, "prepare_cql3_query"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "prepare_cql3_query failed: unknown result"); +- } +- +- public CqlResult execute_prepared_cql_query(int itemId, List values) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException +- { +- send_execute_prepared_cql_query(itemId, values); +- return recv_execute_prepared_cql_query(); +- } +- +- public void send_execute_prepared_cql_query(int itemId, List values) throws org.apache.thrift.TException +- { +- execute_prepared_cql_query_args args = new execute_prepared_cql_query_args(); +- args.setItemId(itemId); +- args.setValues(values); +- sendBase("execute_prepared_cql_query", args); +- } +- +- public CqlResult recv_execute_prepared_cql_query() throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException +- { +- execute_prepared_cql_query_result result = new execute_prepared_cql_query_result(); +- receiveBase(result, "execute_prepared_cql_query"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- if (result.sde != null) { +- throw result.sde; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "execute_prepared_cql_query failed: unknown result"); +- } +- +- public CqlResult execute_prepared_cql3_query(int itemId, List values, ConsistencyLevel consistency) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException +- { +- send_execute_prepared_cql3_query(itemId, values, consistency); +- return recv_execute_prepared_cql3_query(); +- } +- +- public void send_execute_prepared_cql3_query(int itemId, List values, ConsistencyLevel consistency) throws org.apache.thrift.TException +- { +- execute_prepared_cql3_query_args args = new execute_prepared_cql3_query_args(); +- args.setItemId(itemId); +- args.setValues(values); +- args.setConsistency(consistency); +- sendBase("execute_prepared_cql3_query", args); +- } +- +- public CqlResult recv_execute_prepared_cql3_query() throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException +- { +- execute_prepared_cql3_query_result result = new execute_prepared_cql3_query_result(); +- receiveBase(result, "execute_prepared_cql3_query"); +- if (result.isSetSuccess()) { +- return result.success; +- } +- if (result.ire != null) { +- throw result.ire; +- } +- if (result.ue != null) { +- throw result.ue; +- } +- if (result.te != null) { +- throw result.te; +- } +- if (result.sde != null) { +- throw result.sde; +- } +- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "execute_prepared_cql3_query failed: unknown result"); +- } +- +- public void set_cql_version(String version) throws InvalidRequestException, org.apache.thrift.TException +- { +- send_set_cql_version(version); +- recv_set_cql_version(); +- } +- +- public void send_set_cql_version(String version) throws org.apache.thrift.TException +- { +- set_cql_version_args args = new set_cql_version_args(); +- args.setVersion(version); +- sendBase("set_cql_version", args); +- } +- +- public void recv_set_cql_version() throws InvalidRequestException, org.apache.thrift.TException +- { +- set_cql_version_result result = new set_cql_version_result(); +- receiveBase(result, "set_cql_version"); +- if (result.ire != null) { +- throw result.ire; +- } +- return; +- } +- +- } +- public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface { +- public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { +- private org.apache.thrift.async.TAsyncClientManager clientManager; +- private org.apache.thrift.protocol.TProtocolFactory protocolFactory; +- public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) { +- this.clientManager = clientManager; +- this.protocolFactory = protocolFactory; +- } +- public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) { +- return new AsyncClient(protocolFactory, clientManager, transport); +- } +- } +- +- public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) { +- super(protocolFactory, clientManager, transport); +- } +- +- public void login(AuthenticationRequest auth_request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- login_call method_call = new login_call(auth_request, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class login_call extends org.apache.thrift.async.TAsyncMethodCall { +- private AuthenticationRequest auth_request; +- public login_call(AuthenticationRequest auth_request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.auth_request = auth_request; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("login", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- login_args args = new login_args(); +- args.setAuth_request(auth_request); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public void getResult() throws AuthenticationException, AuthorizationException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- (new Client(prot)).recv_login(); +- } +- } +- +- public void set_keyspace(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- set_keyspace_call method_call = new set_keyspace_call(keyspace, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class set_keyspace_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String keyspace; +- public set_keyspace_call(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.keyspace = keyspace; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("set_keyspace", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- set_keyspace_args args = new set_keyspace_args(); +- args.setKeyspace(keyspace); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public void getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- (new Client(prot)).recv_set_keyspace(); +- } +- } +- +- public void get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- get_call method_call = new get_call(key, column_path, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class get_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer key; +- private ColumnPath column_path; +- private ConsistencyLevel consistency_level; +- public get_call(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.key = key; +- this.column_path = column_path; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- get_args args = new get_args(); +- args.setKey(key); +- args.setColumn_path(column_path); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public ColumnOrSuperColumn getResult() throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_get(); +- } +- } +- +- public void get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- get_slice_call method_call = new get_slice_call(key, column_parent, predicate, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class get_slice_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer key; +- private ColumnParent column_parent; +- private SlicePredicate predicate; +- private ConsistencyLevel consistency_level; +- public get_slice_call(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.key = key; +- this.column_parent = column_parent; +- this.predicate = predicate; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_slice", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- get_slice_args args = new get_slice_args(); +- args.setKey(key); +- args.setColumn_parent(column_parent); +- args.setPredicate(predicate); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public List getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_get_slice(); +- } +- } +- +- public void get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- get_count_call method_call = new get_count_call(key, column_parent, predicate, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class get_count_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer key; +- private ColumnParent column_parent; +- private SlicePredicate predicate; +- private ConsistencyLevel consistency_level; +- public get_count_call(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.key = key; +- this.column_parent = column_parent; +- this.predicate = predicate; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_count", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- get_count_args args = new get_count_args(); +- args.setKey(key); +- args.setColumn_parent(column_parent); +- args.setPredicate(predicate); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public int getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_get_count(); +- } +- } +- +- public void multiget_slice(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- multiget_slice_call method_call = new multiget_slice_call(keys, column_parent, predicate, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class multiget_slice_call extends org.apache.thrift.async.TAsyncMethodCall { +- private List keys; +- private ColumnParent column_parent; +- private SlicePredicate predicate; +- private ConsistencyLevel consistency_level; +- public multiget_slice_call(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.keys = keys; +- this.column_parent = column_parent; +- this.predicate = predicate; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("multiget_slice", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- multiget_slice_args args = new multiget_slice_args(); +- args.setKeys(keys); +- args.setColumn_parent(column_parent); +- args.setPredicate(predicate); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public Map> getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_multiget_slice(); +- } +- } +- +- public void multiget_count(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- multiget_count_call method_call = new multiget_count_call(keys, column_parent, predicate, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class multiget_count_call extends org.apache.thrift.async.TAsyncMethodCall { +- private List keys; +- private ColumnParent column_parent; +- private SlicePredicate predicate; +- private ConsistencyLevel consistency_level; +- public multiget_count_call(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.keys = keys; +- this.column_parent = column_parent; +- this.predicate = predicate; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("multiget_count", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- multiget_count_args args = new multiget_count_args(); +- args.setKeys(keys); +- args.setColumn_parent(column_parent); +- args.setPredicate(predicate); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public Map getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_multiget_count(); +- } +- } +- +- public void get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- get_range_slices_call method_call = new get_range_slices_call(column_parent, predicate, range, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class get_range_slices_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ColumnParent column_parent; +- private SlicePredicate predicate; +- private KeyRange range; +- private ConsistencyLevel consistency_level; +- public get_range_slices_call(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.column_parent = column_parent; +- this.predicate = predicate; +- this.range = range; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_range_slices", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- get_range_slices_args args = new get_range_slices_args(); +- args.setColumn_parent(column_parent); +- args.setPredicate(predicate); +- args.setRange(range); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public List getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_get_range_slices(); +- } +- } +- +- public void get_paged_slice(String column_family, KeyRange range, ByteBuffer start_column, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- get_paged_slice_call method_call = new get_paged_slice_call(column_family, range, start_column, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class get_paged_slice_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String column_family; +- private KeyRange range; +- private ByteBuffer start_column; +- private ConsistencyLevel consistency_level; +- public get_paged_slice_call(String column_family, KeyRange range, ByteBuffer start_column, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.column_family = column_family; +- this.range = range; +- this.start_column = start_column; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_paged_slice", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- get_paged_slice_args args = new get_paged_slice_args(); +- args.setColumn_family(column_family); +- args.setRange(range); +- args.setStart_column(start_column); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public List getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_get_paged_slice(); +- } +- } +- +- public void get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- get_indexed_slices_call method_call = new get_indexed_slices_call(column_parent, index_clause, column_predicate, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class get_indexed_slices_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ColumnParent column_parent; +- private IndexClause index_clause; +- private SlicePredicate column_predicate; +- private ConsistencyLevel consistency_level; +- public get_indexed_slices_call(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.column_parent = column_parent; +- this.index_clause = index_clause; +- this.column_predicate = column_predicate; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_indexed_slices", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- get_indexed_slices_args args = new get_indexed_slices_args(); +- args.setColumn_parent(column_parent); +- args.setIndex_clause(index_clause); +- args.setColumn_predicate(column_predicate); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public List getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_get_indexed_slices(); +- } +- } +- +- public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- insert_call method_call = new insert_call(key, column_parent, column, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class insert_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer key; +- private ColumnParent column_parent; +- private Column column; +- private ConsistencyLevel consistency_level; +- public insert_call(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.key = key; +- this.column_parent = column_parent; +- this.column = column; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("insert", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- insert_args args = new insert_args(); +- args.setKey(key); +- args.setColumn_parent(column_parent); +- args.setColumn(column); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public void getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- (new Client(prot)).recv_insert(); +- } +- } +- +- public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- add_call method_call = new add_call(key, column_parent, column, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class add_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer key; +- private ColumnParent column_parent; +- private CounterColumn column; +- private ConsistencyLevel consistency_level; +- public add_call(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.key = key; +- this.column_parent = column_parent; +- this.column = column; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- add_args args = new add_args(); +- args.setKey(key); +- args.setColumn_parent(column_parent); +- args.setColumn(column); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public void getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- (new Client(prot)).recv_add(); +- } +- } +- +- public void cas(ByteBuffer key, String column_family, List expected, List updates, ConsistencyLevel serial_consistency_level, ConsistencyLevel commit_consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- cas_call method_call = new cas_call(key, column_family, expected, updates, serial_consistency_level, commit_consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class cas_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer key; +- private String column_family; +- private List expected; +- private List updates; +- private ConsistencyLevel serial_consistency_level; +- private ConsistencyLevel commit_consistency_level; +- public cas_call(ByteBuffer key, String column_family, List expected, List updates, ConsistencyLevel serial_consistency_level, ConsistencyLevel commit_consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.key = key; +- this.column_family = column_family; +- this.expected = expected; +- this.updates = updates; +- this.serial_consistency_level = serial_consistency_level; +- this.commit_consistency_level = commit_consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("cas", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- cas_args args = new cas_args(); +- args.setKey(key); +- args.setColumn_family(column_family); +- args.setExpected(expected); +- args.setUpdates(updates); +- args.setSerial_consistency_level(serial_consistency_level); +- args.setCommit_consistency_level(commit_consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public CASResult getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_cas(); +- } +- } +- +- public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- remove_call method_call = new remove_call(key, column_path, timestamp, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class remove_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer key; +- private ColumnPath column_path; +- private long timestamp; +- private ConsistencyLevel consistency_level; +- public remove_call(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.key = key; +- this.column_path = column_path; +- this.timestamp = timestamp; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("remove", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- remove_args args = new remove_args(); +- args.setKey(key); +- args.setColumn_path(column_path); +- args.setTimestamp(timestamp); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public void getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- (new Client(prot)).recv_remove(); +- } +- } +- +- public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- remove_counter_call method_call = new remove_counter_call(key, path, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class remove_counter_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer key; +- private ColumnPath path; +- private ConsistencyLevel consistency_level; +- public remove_counter_call(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.key = key; +- this.path = path; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("remove_counter", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- remove_counter_args args = new remove_counter_args(); +- args.setKey(key); +- args.setPath(path); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public void getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- (new Client(prot)).recv_remove_counter(); +- } +- } +- +- public void batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- batch_mutate_call method_call = new batch_mutate_call(mutation_map, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class batch_mutate_call extends org.apache.thrift.async.TAsyncMethodCall { +- private Map>> mutation_map; +- private ConsistencyLevel consistency_level; +- public batch_mutate_call(Map>> mutation_map, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.mutation_map = mutation_map; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("batch_mutate", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- batch_mutate_args args = new batch_mutate_args(); +- args.setMutation_map(mutation_map); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public void getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- (new Client(prot)).recv_batch_mutate(); +- } +- } +- +- public void atomic_batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- atomic_batch_mutate_call method_call = new atomic_batch_mutate_call(mutation_map, consistency_level, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class atomic_batch_mutate_call extends org.apache.thrift.async.TAsyncMethodCall { +- private Map>> mutation_map; +- private ConsistencyLevel consistency_level; +- public atomic_batch_mutate_call(Map>> mutation_map, ConsistencyLevel consistency_level, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.mutation_map = mutation_map; +- this.consistency_level = consistency_level; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("atomic_batch_mutate", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- atomic_batch_mutate_args args = new atomic_batch_mutate_args(); +- args.setMutation_map(mutation_map); +- args.setConsistency_level(consistency_level); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public void getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- (new Client(prot)).recv_atomic_batch_mutate(); +- } +- } +- +- public void truncate(String cfname, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- truncate_call method_call = new truncate_call(cfname, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class truncate_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String cfname; +- public truncate_call(String cfname, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.cfname = cfname; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("truncate", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- truncate_args args = new truncate_args(); +- args.setCfname(cfname); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public void getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- (new Client(prot)).recv_truncate(); +- } +- } +- +- public void get_multi_slice(MultiSliceRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- get_multi_slice_call method_call = new get_multi_slice_call(request, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class get_multi_slice_call extends org.apache.thrift.async.TAsyncMethodCall { +- private MultiSliceRequest request; +- public get_multi_slice_call(MultiSliceRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.request = request; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_multi_slice", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- get_multi_slice_args args = new get_multi_slice_args(); +- args.setRequest(request); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public List getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_get_multi_slice(); +- } +- } +- +- public void describe_schema_versions(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_schema_versions_call method_call = new describe_schema_versions_call(resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_schema_versions_call extends org.apache.thrift.async.TAsyncMethodCall { +- public describe_schema_versions_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_schema_versions", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_schema_versions_args args = new describe_schema_versions_args(); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public Map> getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_schema_versions(); +- } +- } +- +- public void describe_keyspaces(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_keyspaces_call method_call = new describe_keyspaces_call(resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_keyspaces_call extends org.apache.thrift.async.TAsyncMethodCall { +- public describe_keyspaces_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_keyspaces", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_keyspaces_args args = new describe_keyspaces_args(); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public List getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_keyspaces(); +- } +- } +- +- public void describe_cluster_name(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_cluster_name_call method_call = new describe_cluster_name_call(resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_cluster_name_call extends org.apache.thrift.async.TAsyncMethodCall { +- public describe_cluster_name_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_cluster_name", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_cluster_name_args args = new describe_cluster_name_args(); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public String getResult() throws org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_cluster_name(); +- } +- } +- +- public void describe_version(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_version_call method_call = new describe_version_call(resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_version_call extends org.apache.thrift.async.TAsyncMethodCall { +- public describe_version_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_version", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_version_args args = new describe_version_args(); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public String getResult() throws org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_version(); +- } +- } +- +- public void describe_ring(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_ring_call method_call = new describe_ring_call(keyspace, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_ring_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String keyspace; +- public describe_ring_call(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.keyspace = keyspace; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_ring", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_ring_args args = new describe_ring_args(); +- args.setKeyspace(keyspace); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public List getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_ring(); +- } +- } +- +- public void describe_local_ring(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_local_ring_call method_call = new describe_local_ring_call(keyspace, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_local_ring_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String keyspace; +- public describe_local_ring_call(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.keyspace = keyspace; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_local_ring", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_local_ring_args args = new describe_local_ring_args(); +- args.setKeyspace(keyspace); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public List getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_local_ring(); +- } +- } +- +- public void describe_token_map(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_token_map_call method_call = new describe_token_map_call(resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_token_map_call extends org.apache.thrift.async.TAsyncMethodCall { +- public describe_token_map_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_token_map", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_token_map_args args = new describe_token_map_args(); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public Map getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_token_map(); +- } +- } +- +- public void describe_partitioner(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_partitioner_call method_call = new describe_partitioner_call(resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_partitioner_call extends org.apache.thrift.async.TAsyncMethodCall { +- public describe_partitioner_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_partitioner", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_partitioner_args args = new describe_partitioner_args(); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public String getResult() throws org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_partitioner(); +- } +- } +- +- public void describe_snitch(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_snitch_call method_call = new describe_snitch_call(resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_snitch_call extends org.apache.thrift.async.TAsyncMethodCall { +- public describe_snitch_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_snitch", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_snitch_args args = new describe_snitch_args(); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public String getResult() throws org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_snitch(); +- } +- } +- +- public void describe_keyspace(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_keyspace_call method_call = new describe_keyspace_call(keyspace, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_keyspace_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String keyspace; +- public describe_keyspace_call(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.keyspace = keyspace; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_keyspace", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_keyspace_args args = new describe_keyspace_args(); +- args.setKeyspace(keyspace); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public KsDef getResult() throws NotFoundException, InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_keyspace(); +- } +- } +- +- public void describe_splits(String cfName, String start_token, String end_token, int keys_per_split, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_splits_call method_call = new describe_splits_call(cfName, start_token, end_token, keys_per_split, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_splits_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String cfName; +- private String start_token; +- private String end_token; +- private int keys_per_split; +- public describe_splits_call(String cfName, String start_token, String end_token, int keys_per_split, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.cfName = cfName; +- this.start_token = start_token; +- this.end_token = end_token; +- this.keys_per_split = keys_per_split; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_splits", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_splits_args args = new describe_splits_args(); +- args.setCfName(cfName); +- args.setStart_token(start_token); +- args.setEnd_token(end_token); +- args.setKeys_per_split(keys_per_split); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public List getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_splits(); +- } +- } +- +- public void trace_next_query(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- trace_next_query_call method_call = new trace_next_query_call(resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class trace_next_query_call extends org.apache.thrift.async.TAsyncMethodCall { +- public trace_next_query_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("trace_next_query", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- trace_next_query_args args = new trace_next_query_args(); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public ByteBuffer getResult() throws org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_trace_next_query(); +- } +- } +- +- public void describe_splits_ex(String cfName, String start_token, String end_token, int keys_per_split, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- describe_splits_ex_call method_call = new describe_splits_ex_call(cfName, start_token, end_token, keys_per_split, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class describe_splits_ex_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String cfName; +- private String start_token; +- private String end_token; +- private int keys_per_split; +- public describe_splits_ex_call(String cfName, String start_token, String end_token, int keys_per_split, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.cfName = cfName; +- this.start_token = start_token; +- this.end_token = end_token; +- this.keys_per_split = keys_per_split; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("describe_splits_ex", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- describe_splits_ex_args args = new describe_splits_ex_args(); +- args.setCfName(cfName); +- args.setStart_token(start_token); +- args.setEnd_token(end_token); +- args.setKeys_per_split(keys_per_split); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public List getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_describe_splits_ex(); +- } +- } +- +- public void system_add_column_family(CfDef cf_def, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- system_add_column_family_call method_call = new system_add_column_family_call(cf_def, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class system_add_column_family_call extends org.apache.thrift.async.TAsyncMethodCall { +- private CfDef cf_def; +- public system_add_column_family_call(CfDef cf_def, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.cf_def = cf_def; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("system_add_column_family", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- system_add_column_family_args args = new system_add_column_family_args(); +- args.setCf_def(cf_def); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public String getResult() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_system_add_column_family(); +- } +- } +- +- public void system_drop_column_family(String column_family, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- system_drop_column_family_call method_call = new system_drop_column_family_call(column_family, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class system_drop_column_family_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String column_family; +- public system_drop_column_family_call(String column_family, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.column_family = column_family; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("system_drop_column_family", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- system_drop_column_family_args args = new system_drop_column_family_args(); +- args.setColumn_family(column_family); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public String getResult() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_system_drop_column_family(); +- } +- } +- +- public void system_add_keyspace(KsDef ks_def, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- system_add_keyspace_call method_call = new system_add_keyspace_call(ks_def, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class system_add_keyspace_call extends org.apache.thrift.async.TAsyncMethodCall { +- private KsDef ks_def; +- public system_add_keyspace_call(KsDef ks_def, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.ks_def = ks_def; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("system_add_keyspace", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- system_add_keyspace_args args = new system_add_keyspace_args(); +- args.setKs_def(ks_def); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public String getResult() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_system_add_keyspace(); +- } +- } +- +- public void system_drop_keyspace(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- system_drop_keyspace_call method_call = new system_drop_keyspace_call(keyspace, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class system_drop_keyspace_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String keyspace; +- public system_drop_keyspace_call(String keyspace, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.keyspace = keyspace; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("system_drop_keyspace", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- system_drop_keyspace_args args = new system_drop_keyspace_args(); +- args.setKeyspace(keyspace); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public String getResult() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_system_drop_keyspace(); +- } +- } +- +- public void system_update_keyspace(KsDef ks_def, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- system_update_keyspace_call method_call = new system_update_keyspace_call(ks_def, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class system_update_keyspace_call extends org.apache.thrift.async.TAsyncMethodCall { +- private KsDef ks_def; +- public system_update_keyspace_call(KsDef ks_def, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.ks_def = ks_def; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("system_update_keyspace", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- system_update_keyspace_args args = new system_update_keyspace_args(); +- args.setKs_def(ks_def); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public String getResult() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_system_update_keyspace(); +- } +- } +- +- public void system_update_column_family(CfDef cf_def, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- system_update_column_family_call method_call = new system_update_column_family_call(cf_def, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class system_update_column_family_call extends org.apache.thrift.async.TAsyncMethodCall { +- private CfDef cf_def; +- public system_update_column_family_call(CfDef cf_def, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.cf_def = cf_def; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("system_update_column_family", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- system_update_column_family_args args = new system_update_column_family_args(); +- args.setCf_def(cf_def); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public String getResult() throws InvalidRequestException, SchemaDisagreementException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_system_update_column_family(); +- } +- } +- +- public void execute_cql_query(ByteBuffer query, Compression compression, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- execute_cql_query_call method_call = new execute_cql_query_call(query, compression, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class execute_cql_query_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer query; +- private Compression compression; +- public execute_cql_query_call(ByteBuffer query, Compression compression, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.query = query; +- this.compression = compression; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("execute_cql_query", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- execute_cql_query_args args = new execute_cql_query_args(); +- args.setQuery(query); +- args.setCompression(compression); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public CqlResult getResult() throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_execute_cql_query(); +- } +- } +- +- public void execute_cql3_query(ByteBuffer query, Compression compression, ConsistencyLevel consistency, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- execute_cql3_query_call method_call = new execute_cql3_query_call(query, compression, consistency, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class execute_cql3_query_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer query; +- private Compression compression; +- private ConsistencyLevel consistency; +- public execute_cql3_query_call(ByteBuffer query, Compression compression, ConsistencyLevel consistency, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.query = query; +- this.compression = compression; +- this.consistency = consistency; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("execute_cql3_query", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- execute_cql3_query_args args = new execute_cql3_query_args(); +- args.setQuery(query); +- args.setCompression(compression); +- args.setConsistency(consistency); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public CqlResult getResult() throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_execute_cql3_query(); +- } +- } +- +- public void prepare_cql_query(ByteBuffer query, Compression compression, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- prepare_cql_query_call method_call = new prepare_cql_query_call(query, compression, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class prepare_cql_query_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer query; +- private Compression compression; +- public prepare_cql_query_call(ByteBuffer query, Compression compression, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.query = query; +- this.compression = compression; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("prepare_cql_query", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- prepare_cql_query_args args = new prepare_cql_query_args(); +- args.setQuery(query); +- args.setCompression(compression); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public CqlPreparedResult getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_prepare_cql_query(); +- } +- } +- +- public void prepare_cql3_query(ByteBuffer query, Compression compression, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- prepare_cql3_query_call method_call = new prepare_cql3_query_call(query, compression, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class prepare_cql3_query_call extends org.apache.thrift.async.TAsyncMethodCall { +- private ByteBuffer query; +- private Compression compression; +- public prepare_cql3_query_call(ByteBuffer query, Compression compression, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.query = query; +- this.compression = compression; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("prepare_cql3_query", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- prepare_cql3_query_args args = new prepare_cql3_query_args(); +- args.setQuery(query); +- args.setCompression(compression); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public CqlPreparedResult getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_prepare_cql3_query(); +- } +- } +- +- public void execute_prepared_cql_query(int itemId, List values, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- execute_prepared_cql_query_call method_call = new execute_prepared_cql_query_call(itemId, values, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class execute_prepared_cql_query_call extends org.apache.thrift.async.TAsyncMethodCall { +- private int itemId; +- private List values; +- public execute_prepared_cql_query_call(int itemId, List values, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.itemId = itemId; +- this.values = values; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("execute_prepared_cql_query", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- execute_prepared_cql_query_args args = new execute_prepared_cql_query_args(); +- args.setItemId(itemId); +- args.setValues(values); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public CqlResult getResult() throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_execute_prepared_cql_query(); +- } +- } +- +- public void execute_prepared_cql3_query(int itemId, List values, ConsistencyLevel consistency, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- execute_prepared_cql3_query_call method_call = new execute_prepared_cql3_query_call(itemId, values, consistency, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class execute_prepared_cql3_query_call extends org.apache.thrift.async.TAsyncMethodCall { +- private int itemId; +- private List values; +- private ConsistencyLevel consistency; +- public execute_prepared_cql3_query_call(int itemId, List values, ConsistencyLevel consistency, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.itemId = itemId; +- this.values = values; +- this.consistency = consistency; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("execute_prepared_cql3_query", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- execute_prepared_cql3_query_args args = new execute_prepared_cql3_query_args(); +- args.setItemId(itemId); +- args.setValues(values); +- args.setConsistency(consistency); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public CqlResult getResult() throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- return (new Client(prot)).recv_execute_prepared_cql3_query(); +- } +- } +- +- public void set_cql_version(String version, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { +- checkReady(); +- set_cql_version_call method_call = new set_cql_version_call(version, resultHandler, this, ___protocolFactory, ___transport); +- this.___currentMethod = method_call; +- ___manager.call(method_call); +- } +- +- public static class set_cql_version_call extends org.apache.thrift.async.TAsyncMethodCall { +- private String version; +- public set_cql_version_call(String version, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { +- super(client, protocolFactory, transport, resultHandler, false); +- this.version = version; +- } +- +- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { +- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("set_cql_version", org.apache.thrift.protocol.TMessageType.CALL, 0)); +- set_cql_version_args args = new set_cql_version_args(); +- args.setVersion(version); +- args.write(prot); +- prot.writeMessageEnd(); +- } +- +- public void getResult() throws InvalidRequestException, org.apache.thrift.TException { +- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { +- throw new IllegalStateException("Method call not finished!"); +- } +- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); +- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); +- (new Client(prot)).recv_set_cql_version(); +- } +- } +- +- } +- +- public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor { +- private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName()); +- public Processor(I iface) { +- super(iface, getProcessMap(new HashMap>())); +- } +- +- protected Processor(I iface, Map> processMap) { +- super(iface, getProcessMap(processMap)); +- } +- +- private static Map> getProcessMap(Map> processMap) { +- processMap.put("login", new login()); +- processMap.put("set_keyspace", new set_keyspace()); +- processMap.put("get", new get()); +- processMap.put("get_slice", new get_slice()); +- processMap.put("get_count", new get_count()); +- processMap.put("multiget_slice", new multiget_slice()); +- processMap.put("multiget_count", new multiget_count()); +- processMap.put("get_range_slices", new get_range_slices()); +- processMap.put("get_paged_slice", new get_paged_slice()); +- processMap.put("get_indexed_slices", new get_indexed_slices()); +- processMap.put("insert", new insert()); +- processMap.put("add", new add()); +- processMap.put("cas", new cas()); +- processMap.put("remove", new remove()); +- processMap.put("remove_counter", new remove_counter()); +- processMap.put("batch_mutate", new batch_mutate()); +- processMap.put("atomic_batch_mutate", new atomic_batch_mutate()); +- processMap.put("truncate", new truncate()); +- processMap.put("get_multi_slice", new get_multi_slice()); +- processMap.put("describe_schema_versions", new describe_schema_versions()); +- processMap.put("describe_keyspaces", new describe_keyspaces()); +- processMap.put("describe_cluster_name", new describe_cluster_name()); +- processMap.put("describe_version", new describe_version()); +- processMap.put("describe_ring", new describe_ring()); +- processMap.put("describe_local_ring", new describe_local_ring()); +- processMap.put("describe_token_map", new describe_token_map()); +- processMap.put("describe_partitioner", new describe_partitioner()); +- processMap.put("describe_snitch", new describe_snitch()); +- processMap.put("describe_keyspace", new describe_keyspace()); +- processMap.put("describe_splits", new describe_splits()); +- processMap.put("trace_next_query", new trace_next_query()); +- processMap.put("describe_splits_ex", new describe_splits_ex()); +- processMap.put("system_add_column_family", new system_add_column_family()); +- processMap.put("system_drop_column_family", new system_drop_column_family()); +- processMap.put("system_add_keyspace", new system_add_keyspace()); +- processMap.put("system_drop_keyspace", new system_drop_keyspace()); +- processMap.put("system_update_keyspace", new system_update_keyspace()); +- processMap.put("system_update_column_family", new system_update_column_family()); +- processMap.put("execute_cql_query", new execute_cql_query()); +- processMap.put("execute_cql3_query", new execute_cql3_query()); +- processMap.put("prepare_cql_query", new prepare_cql_query()); +- processMap.put("prepare_cql3_query", new prepare_cql3_query()); +- processMap.put("execute_prepared_cql_query", new execute_prepared_cql_query()); +- processMap.put("execute_prepared_cql3_query", new execute_prepared_cql3_query()); +- processMap.put("set_cql_version", new set_cql_version()); +- return processMap; +- } +- +- public static class login extends org.apache.thrift.ProcessFunction { +- public login() { +- super("login"); +- } +- +- public login_args getEmptyArgsInstance() { +- return new login_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public login_result getResult(I iface, login_args args) throws org.apache.thrift.TException { +- login_result result = new login_result(); +- try { +- iface.login(args.auth_request); +- } catch (AuthenticationException authnx) { +- result.authnx = authnx; +- } catch (AuthorizationException authzx) { +- result.authzx = authzx; +- } +- return result; +- } +- } +- +- public static class set_keyspace extends org.apache.thrift.ProcessFunction { +- public set_keyspace() { +- super("set_keyspace"); +- } +- +- public set_keyspace_args getEmptyArgsInstance() { +- return new set_keyspace_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public set_keyspace_result getResult(I iface, set_keyspace_args args) throws org.apache.thrift.TException { +- set_keyspace_result result = new set_keyspace_result(); +- try { +- iface.set_keyspace(args.keyspace); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class get extends org.apache.thrift.ProcessFunction { +- public get() { +- super("get"); +- } +- +- public get_args getEmptyArgsInstance() { +- return new get_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public get_result getResult(I iface, get_args args) throws org.apache.thrift.TException { +- get_result result = new get_result(); +- try { +- result.success = iface.get(args.key, args.column_path, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (NotFoundException nfe) { +- result.nfe = nfe; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class get_slice extends org.apache.thrift.ProcessFunction { +- public get_slice() { +- super("get_slice"); +- } +- +- public get_slice_args getEmptyArgsInstance() { +- return new get_slice_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public get_slice_result getResult(I iface, get_slice_args args) throws org.apache.thrift.TException { +- get_slice_result result = new get_slice_result(); +- try { +- result.success = iface.get_slice(args.key, args.column_parent, args.predicate, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class get_count extends org.apache.thrift.ProcessFunction { +- public get_count() { +- super("get_count"); +- } +- +- public get_count_args getEmptyArgsInstance() { +- return new get_count_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public get_count_result getResult(I iface, get_count_args args) throws org.apache.thrift.TException { +- get_count_result result = new get_count_result(); +- try { +- result.success = iface.get_count(args.key, args.column_parent, args.predicate, args.consistency_level); +- result.setSuccessIsSet(true); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class multiget_slice extends org.apache.thrift.ProcessFunction { +- public multiget_slice() { +- super("multiget_slice"); +- } +- +- public multiget_slice_args getEmptyArgsInstance() { +- return new multiget_slice_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public multiget_slice_result getResult(I iface, multiget_slice_args args) throws org.apache.thrift.TException { +- multiget_slice_result result = new multiget_slice_result(); +- try { +- result.success = iface.multiget_slice(args.keys, args.column_parent, args.predicate, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class multiget_count extends org.apache.thrift.ProcessFunction { +- public multiget_count() { +- super("multiget_count"); +- } +- +- public multiget_count_args getEmptyArgsInstance() { +- return new multiget_count_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public multiget_count_result getResult(I iface, multiget_count_args args) throws org.apache.thrift.TException { +- multiget_count_result result = new multiget_count_result(); +- try { +- result.success = iface.multiget_count(args.keys, args.column_parent, args.predicate, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class get_range_slices extends org.apache.thrift.ProcessFunction { +- public get_range_slices() { +- super("get_range_slices"); +- } +- +- public get_range_slices_args getEmptyArgsInstance() { +- return new get_range_slices_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public get_range_slices_result getResult(I iface, get_range_slices_args args) throws org.apache.thrift.TException { +- get_range_slices_result result = new get_range_slices_result(); +- try { +- result.success = iface.get_range_slices(args.column_parent, args.predicate, args.range, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class get_paged_slice extends org.apache.thrift.ProcessFunction { +- public get_paged_slice() { +- super("get_paged_slice"); +- } +- +- public get_paged_slice_args getEmptyArgsInstance() { +- return new get_paged_slice_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public get_paged_slice_result getResult(I iface, get_paged_slice_args args) throws org.apache.thrift.TException { +- get_paged_slice_result result = new get_paged_slice_result(); +- try { +- result.success = iface.get_paged_slice(args.column_family, args.range, args.start_column, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class get_indexed_slices extends org.apache.thrift.ProcessFunction { +- public get_indexed_slices() { +- super("get_indexed_slices"); +- } +- +- public get_indexed_slices_args getEmptyArgsInstance() { +- return new get_indexed_slices_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public get_indexed_slices_result getResult(I iface, get_indexed_slices_args args) throws org.apache.thrift.TException { +- get_indexed_slices_result result = new get_indexed_slices_result(); +- try { +- result.success = iface.get_indexed_slices(args.column_parent, args.index_clause, args.column_predicate, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class insert extends org.apache.thrift.ProcessFunction { +- public insert() { +- super("insert"); +- } +- +- public insert_args getEmptyArgsInstance() { +- return new insert_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public insert_result getResult(I iface, insert_args args) throws org.apache.thrift.TException { +- insert_result result = new insert_result(); +- try { +- iface.insert(args.key, args.column_parent, args.column, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class add extends org.apache.thrift.ProcessFunction { +- public add() { +- super("add"); +- } +- +- public add_args getEmptyArgsInstance() { +- return new add_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public add_result getResult(I iface, add_args args) throws org.apache.thrift.TException { +- add_result result = new add_result(); +- try { +- iface.add(args.key, args.column_parent, args.column, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class cas extends org.apache.thrift.ProcessFunction { +- public cas() { +- super("cas"); +- } +- +- public cas_args getEmptyArgsInstance() { +- return new cas_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public cas_result getResult(I iface, cas_args args) throws org.apache.thrift.TException { +- cas_result result = new cas_result(); +- try { +- result.success = iface.cas(args.key, args.column_family, args.expected, args.updates, args.serial_consistency_level, args.commit_consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class remove extends org.apache.thrift.ProcessFunction { +- public remove() { +- super("remove"); +- } +- +- public remove_args getEmptyArgsInstance() { +- return new remove_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public remove_result getResult(I iface, remove_args args) throws org.apache.thrift.TException { +- remove_result result = new remove_result(); +- try { +- iface.remove(args.key, args.column_path, args.timestamp, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class remove_counter extends org.apache.thrift.ProcessFunction { +- public remove_counter() { +- super("remove_counter"); +- } +- +- public remove_counter_args getEmptyArgsInstance() { +- return new remove_counter_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public remove_counter_result getResult(I iface, remove_counter_args args) throws org.apache.thrift.TException { +- remove_counter_result result = new remove_counter_result(); +- try { +- iface.remove_counter(args.key, args.path, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class batch_mutate extends org.apache.thrift.ProcessFunction { +- public batch_mutate() { +- super("batch_mutate"); +- } +- +- public batch_mutate_args getEmptyArgsInstance() { +- return new batch_mutate_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public batch_mutate_result getResult(I iface, batch_mutate_args args) throws org.apache.thrift.TException { +- batch_mutate_result result = new batch_mutate_result(); +- try { +- iface.batch_mutate(args.mutation_map, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class atomic_batch_mutate extends org.apache.thrift.ProcessFunction { +- public atomic_batch_mutate() { +- super("atomic_batch_mutate"); +- } +- +- public atomic_batch_mutate_args getEmptyArgsInstance() { +- return new atomic_batch_mutate_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public atomic_batch_mutate_result getResult(I iface, atomic_batch_mutate_args args) throws org.apache.thrift.TException { +- atomic_batch_mutate_result result = new atomic_batch_mutate_result(); +- try { +- iface.atomic_batch_mutate(args.mutation_map, args.consistency_level); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class truncate extends org.apache.thrift.ProcessFunction { +- public truncate() { +- super("truncate"); +- } +- +- public truncate_args getEmptyArgsInstance() { +- return new truncate_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public truncate_result getResult(I iface, truncate_args args) throws org.apache.thrift.TException { +- truncate_result result = new truncate_result(); +- try { +- iface.truncate(args.cfname); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class get_multi_slice extends org.apache.thrift.ProcessFunction { +- public get_multi_slice() { +- super("get_multi_slice"); +- } +- +- public get_multi_slice_args getEmptyArgsInstance() { +- return new get_multi_slice_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public get_multi_slice_result getResult(I iface, get_multi_slice_args args) throws org.apache.thrift.TException { +- get_multi_slice_result result = new get_multi_slice_result(); +- try { +- result.success = iface.get_multi_slice(args.request); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } +- return result; +- } +- } +- +- public static class describe_schema_versions extends org.apache.thrift.ProcessFunction { +- public describe_schema_versions() { +- super("describe_schema_versions"); +- } +- +- public describe_schema_versions_args getEmptyArgsInstance() { +- return new describe_schema_versions_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_schema_versions_result getResult(I iface, describe_schema_versions_args args) throws org.apache.thrift.TException { +- describe_schema_versions_result result = new describe_schema_versions_result(); +- try { +- result.success = iface.describe_schema_versions(); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class describe_keyspaces extends org.apache.thrift.ProcessFunction { +- public describe_keyspaces() { +- super("describe_keyspaces"); +- } +- +- public describe_keyspaces_args getEmptyArgsInstance() { +- return new describe_keyspaces_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_keyspaces_result getResult(I iface, describe_keyspaces_args args) throws org.apache.thrift.TException { +- describe_keyspaces_result result = new describe_keyspaces_result(); +- try { +- result.success = iface.describe_keyspaces(); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class describe_cluster_name extends org.apache.thrift.ProcessFunction { +- public describe_cluster_name() { +- super("describe_cluster_name"); +- } +- +- public describe_cluster_name_args getEmptyArgsInstance() { +- return new describe_cluster_name_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_cluster_name_result getResult(I iface, describe_cluster_name_args args) throws org.apache.thrift.TException { +- describe_cluster_name_result result = new describe_cluster_name_result(); +- result.success = iface.describe_cluster_name(); +- return result; +- } +- } +- +- public static class describe_version extends org.apache.thrift.ProcessFunction { +- public describe_version() { +- super("describe_version"); +- } +- +- public describe_version_args getEmptyArgsInstance() { +- return new describe_version_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_version_result getResult(I iface, describe_version_args args) throws org.apache.thrift.TException { +- describe_version_result result = new describe_version_result(); +- result.success = iface.describe_version(); +- return result; +- } +- } +- +- public static class describe_ring extends org.apache.thrift.ProcessFunction { +- public describe_ring() { +- super("describe_ring"); +- } +- +- public describe_ring_args getEmptyArgsInstance() { +- return new describe_ring_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_ring_result getResult(I iface, describe_ring_args args) throws org.apache.thrift.TException { +- describe_ring_result result = new describe_ring_result(); +- try { +- result.success = iface.describe_ring(args.keyspace); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class describe_local_ring extends org.apache.thrift.ProcessFunction { +- public describe_local_ring() { +- super("describe_local_ring"); +- } +- +- public describe_local_ring_args getEmptyArgsInstance() { +- return new describe_local_ring_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_local_ring_result getResult(I iface, describe_local_ring_args args) throws org.apache.thrift.TException { +- describe_local_ring_result result = new describe_local_ring_result(); +- try { +- result.success = iface.describe_local_ring(args.keyspace); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class describe_token_map extends org.apache.thrift.ProcessFunction { +- public describe_token_map() { +- super("describe_token_map"); +- } +- +- public describe_token_map_args getEmptyArgsInstance() { +- return new describe_token_map_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_token_map_result getResult(I iface, describe_token_map_args args) throws org.apache.thrift.TException { +- describe_token_map_result result = new describe_token_map_result(); +- try { +- result.success = iface.describe_token_map(); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class describe_partitioner extends org.apache.thrift.ProcessFunction { +- public describe_partitioner() { +- super("describe_partitioner"); +- } +- +- public describe_partitioner_args getEmptyArgsInstance() { +- return new describe_partitioner_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_partitioner_result getResult(I iface, describe_partitioner_args args) throws org.apache.thrift.TException { +- describe_partitioner_result result = new describe_partitioner_result(); +- result.success = iface.describe_partitioner(); +- return result; +- } +- } +- +- public static class describe_snitch extends org.apache.thrift.ProcessFunction { +- public describe_snitch() { +- super("describe_snitch"); +- } +- +- public describe_snitch_args getEmptyArgsInstance() { +- return new describe_snitch_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_snitch_result getResult(I iface, describe_snitch_args args) throws org.apache.thrift.TException { +- describe_snitch_result result = new describe_snitch_result(); +- result.success = iface.describe_snitch(); +- return result; +- } +- } +- +- public static class describe_keyspace extends org.apache.thrift.ProcessFunction { +- public describe_keyspace() { +- super("describe_keyspace"); +- } +- +- public describe_keyspace_args getEmptyArgsInstance() { +- return new describe_keyspace_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_keyspace_result getResult(I iface, describe_keyspace_args args) throws org.apache.thrift.TException { +- describe_keyspace_result result = new describe_keyspace_result(); +- try { +- result.success = iface.describe_keyspace(args.keyspace); +- } catch (NotFoundException nfe) { +- result.nfe = nfe; +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class describe_splits extends org.apache.thrift.ProcessFunction { +- public describe_splits() { +- super("describe_splits"); +- } +- +- public describe_splits_args getEmptyArgsInstance() { +- return new describe_splits_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_splits_result getResult(I iface, describe_splits_args args) throws org.apache.thrift.TException { +- describe_splits_result result = new describe_splits_result(); +- try { +- result.success = iface.describe_splits(args.cfName, args.start_token, args.end_token, args.keys_per_split); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class trace_next_query extends org.apache.thrift.ProcessFunction { +- public trace_next_query() { +- super("trace_next_query"); +- } +- +- public trace_next_query_args getEmptyArgsInstance() { +- return new trace_next_query_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public trace_next_query_result getResult(I iface, trace_next_query_args args) throws org.apache.thrift.TException { +- trace_next_query_result result = new trace_next_query_result(); +- result.success = iface.trace_next_query(); +- return result; +- } +- } +- +- public static class describe_splits_ex extends org.apache.thrift.ProcessFunction { +- public describe_splits_ex() { +- super("describe_splits_ex"); +- } +- +- public describe_splits_ex_args getEmptyArgsInstance() { +- return new describe_splits_ex_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public describe_splits_ex_result getResult(I iface, describe_splits_ex_args args) throws org.apache.thrift.TException { +- describe_splits_ex_result result = new describe_splits_ex_result(); +- try { +- result.success = iface.describe_splits_ex(args.cfName, args.start_token, args.end_token, args.keys_per_split); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class system_add_column_family extends org.apache.thrift.ProcessFunction { +- public system_add_column_family() { +- super("system_add_column_family"); +- } +- +- public system_add_column_family_args getEmptyArgsInstance() { +- return new system_add_column_family_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public system_add_column_family_result getResult(I iface, system_add_column_family_args args) throws org.apache.thrift.TException { +- system_add_column_family_result result = new system_add_column_family_result(); +- try { +- result.success = iface.system_add_column_family(args.cf_def); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (SchemaDisagreementException sde) { +- result.sde = sde; +- } +- return result; +- } +- } +- +- public static class system_drop_column_family extends org.apache.thrift.ProcessFunction { +- public system_drop_column_family() { +- super("system_drop_column_family"); +- } +- +- public system_drop_column_family_args getEmptyArgsInstance() { +- return new system_drop_column_family_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public system_drop_column_family_result getResult(I iface, system_drop_column_family_args args) throws org.apache.thrift.TException { +- system_drop_column_family_result result = new system_drop_column_family_result(); +- try { +- result.success = iface.system_drop_column_family(args.column_family); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (SchemaDisagreementException sde) { +- result.sde = sde; +- } +- return result; +- } +- } +- +- public static class system_add_keyspace extends org.apache.thrift.ProcessFunction { +- public system_add_keyspace() { +- super("system_add_keyspace"); +- } +- +- public system_add_keyspace_args getEmptyArgsInstance() { +- return new system_add_keyspace_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public system_add_keyspace_result getResult(I iface, system_add_keyspace_args args) throws org.apache.thrift.TException { +- system_add_keyspace_result result = new system_add_keyspace_result(); +- try { +- result.success = iface.system_add_keyspace(args.ks_def); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (SchemaDisagreementException sde) { +- result.sde = sde; +- } +- return result; +- } +- } +- +- public static class system_drop_keyspace extends org.apache.thrift.ProcessFunction { +- public system_drop_keyspace() { +- super("system_drop_keyspace"); +- } +- +- public system_drop_keyspace_args getEmptyArgsInstance() { +- return new system_drop_keyspace_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public system_drop_keyspace_result getResult(I iface, system_drop_keyspace_args args) throws org.apache.thrift.TException { +- system_drop_keyspace_result result = new system_drop_keyspace_result(); +- try { +- result.success = iface.system_drop_keyspace(args.keyspace); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (SchemaDisagreementException sde) { +- result.sde = sde; +- } +- return result; +- } +- } +- +- public static class system_update_keyspace extends org.apache.thrift.ProcessFunction { +- public system_update_keyspace() { +- super("system_update_keyspace"); +- } +- +- public system_update_keyspace_args getEmptyArgsInstance() { +- return new system_update_keyspace_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public system_update_keyspace_result getResult(I iface, system_update_keyspace_args args) throws org.apache.thrift.TException { +- system_update_keyspace_result result = new system_update_keyspace_result(); +- try { +- result.success = iface.system_update_keyspace(args.ks_def); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (SchemaDisagreementException sde) { +- result.sde = sde; +- } +- return result; +- } +- } +- +- public static class system_update_column_family extends org.apache.thrift.ProcessFunction { +- public system_update_column_family() { +- super("system_update_column_family"); +- } +- +- public system_update_column_family_args getEmptyArgsInstance() { +- return new system_update_column_family_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public system_update_column_family_result getResult(I iface, system_update_column_family_args args) throws org.apache.thrift.TException { +- system_update_column_family_result result = new system_update_column_family_result(); +- try { +- result.success = iface.system_update_column_family(args.cf_def); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (SchemaDisagreementException sde) { +- result.sde = sde; +- } +- return result; +- } +- } +- +- public static class execute_cql_query extends org.apache.thrift.ProcessFunction { +- public execute_cql_query() { +- super("execute_cql_query"); +- } +- +- public execute_cql_query_args getEmptyArgsInstance() { +- return new execute_cql_query_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public execute_cql_query_result getResult(I iface, execute_cql_query_args args) throws org.apache.thrift.TException { +- execute_cql_query_result result = new execute_cql_query_result(); +- try { +- result.success = iface.execute_cql_query(args.query, args.compression); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } catch (SchemaDisagreementException sde) { +- result.sde = sde; +- } +- return result; +- } +- } +- +- public static class execute_cql3_query extends org.apache.thrift.ProcessFunction { +- public execute_cql3_query() { +- super("execute_cql3_query"); +- } +- +- public execute_cql3_query_args getEmptyArgsInstance() { +- return new execute_cql3_query_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public execute_cql3_query_result getResult(I iface, execute_cql3_query_args args) throws org.apache.thrift.TException { +- execute_cql3_query_result result = new execute_cql3_query_result(); +- try { +- result.success = iface.execute_cql3_query(args.query, args.compression, args.consistency); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } catch (SchemaDisagreementException sde) { +- result.sde = sde; +- } +- return result; +- } +- } +- +- public static class prepare_cql_query extends org.apache.thrift.ProcessFunction { +- public prepare_cql_query() { +- super("prepare_cql_query"); +- } +- +- public prepare_cql_query_args getEmptyArgsInstance() { +- return new prepare_cql_query_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public prepare_cql_query_result getResult(I iface, prepare_cql_query_args args) throws org.apache.thrift.TException { +- prepare_cql_query_result result = new prepare_cql_query_result(); +- try { +- result.success = iface.prepare_cql_query(args.query, args.compression); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class prepare_cql3_query extends org.apache.thrift.ProcessFunction { +- public prepare_cql3_query() { +- super("prepare_cql3_query"); +- } +- +- public prepare_cql3_query_args getEmptyArgsInstance() { +- return new prepare_cql3_query_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public prepare_cql3_query_result getResult(I iface, prepare_cql3_query_args args) throws org.apache.thrift.TException { +- prepare_cql3_query_result result = new prepare_cql3_query_result(); +- try { +- result.success = iface.prepare_cql3_query(args.query, args.compression); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- public static class execute_prepared_cql_query extends org.apache.thrift.ProcessFunction { +- public execute_prepared_cql_query() { +- super("execute_prepared_cql_query"); +- } +- +- public execute_prepared_cql_query_args getEmptyArgsInstance() { +- return new execute_prepared_cql_query_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public execute_prepared_cql_query_result getResult(I iface, execute_prepared_cql_query_args args) throws org.apache.thrift.TException { +- execute_prepared_cql_query_result result = new execute_prepared_cql_query_result(); +- try { +- result.success = iface.execute_prepared_cql_query(args.itemId, args.values); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } catch (SchemaDisagreementException sde) { +- result.sde = sde; +- } +- return result; +- } +- } +- +- public static class execute_prepared_cql3_query extends org.apache.thrift.ProcessFunction { +- public execute_prepared_cql3_query() { +- super("execute_prepared_cql3_query"); +- } +- +- public execute_prepared_cql3_query_args getEmptyArgsInstance() { +- return new execute_prepared_cql3_query_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public execute_prepared_cql3_query_result getResult(I iface, execute_prepared_cql3_query_args args) throws org.apache.thrift.TException { +- execute_prepared_cql3_query_result result = new execute_prepared_cql3_query_result(); +- try { +- result.success = iface.execute_prepared_cql3_query(args.itemId, args.values, args.consistency); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } catch (UnavailableException ue) { +- result.ue = ue; +- } catch (TimedOutException te) { +- result.te = te; +- } catch (SchemaDisagreementException sde) { +- result.sde = sde; +- } +- return result; +- } +- } +- +- public static class set_cql_version extends org.apache.thrift.ProcessFunction { +- public set_cql_version() { +- super("set_cql_version"); +- } +- +- public set_cql_version_args getEmptyArgsInstance() { +- return new set_cql_version_args(); +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public set_cql_version_result getResult(I iface, set_cql_version_args args) throws org.apache.thrift.TException { +- set_cql_version_result result = new set_cql_version_result(); +- try { +- iface.set_cql_version(args.version); +- } catch (InvalidRequestException ire) { +- result.ire = ire; +- } +- return result; +- } +- } +- +- } +- +- public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor { +- private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName()); +- public AsyncProcessor(I iface) { +- super(iface, getProcessMap(new HashMap>())); +- } +- +- protected AsyncProcessor(I iface, Map> processMap) { +- super(iface, getProcessMap(processMap)); +- } +- +- private static Map> getProcessMap(Map> processMap) { +- processMap.put("login", new login()); +- processMap.put("set_keyspace", new set_keyspace()); +- processMap.put("get", new get()); +- processMap.put("get_slice", new get_slice()); +- processMap.put("get_count", new get_count()); +- processMap.put("multiget_slice", new multiget_slice()); +- processMap.put("multiget_count", new multiget_count()); +- processMap.put("get_range_slices", new get_range_slices()); +- processMap.put("get_paged_slice", new get_paged_slice()); +- processMap.put("get_indexed_slices", new get_indexed_slices()); +- processMap.put("insert", new insert()); +- processMap.put("add", new add()); +- processMap.put("cas", new cas()); +- processMap.put("remove", new remove()); +- processMap.put("remove_counter", new remove_counter()); +- processMap.put("batch_mutate", new batch_mutate()); +- processMap.put("atomic_batch_mutate", new atomic_batch_mutate()); +- processMap.put("truncate", new truncate()); +- processMap.put("get_multi_slice", new get_multi_slice()); +- processMap.put("describe_schema_versions", new describe_schema_versions()); +- processMap.put("describe_keyspaces", new describe_keyspaces()); +- processMap.put("describe_cluster_name", new describe_cluster_name()); +- processMap.put("describe_version", new describe_version()); +- processMap.put("describe_ring", new describe_ring()); +- processMap.put("describe_local_ring", new describe_local_ring()); +- processMap.put("describe_token_map", new describe_token_map()); +- processMap.put("describe_partitioner", new describe_partitioner()); +- processMap.put("describe_snitch", new describe_snitch()); +- processMap.put("describe_keyspace", new describe_keyspace()); +- processMap.put("describe_splits", new describe_splits()); +- processMap.put("trace_next_query", new trace_next_query()); +- processMap.put("describe_splits_ex", new describe_splits_ex()); +- processMap.put("system_add_column_family", new system_add_column_family()); +- processMap.put("system_drop_column_family", new system_drop_column_family()); +- processMap.put("system_add_keyspace", new system_add_keyspace()); +- processMap.put("system_drop_keyspace", new system_drop_keyspace()); +- processMap.put("system_update_keyspace", new system_update_keyspace()); +- processMap.put("system_update_column_family", new system_update_column_family()); +- processMap.put("execute_cql_query", new execute_cql_query()); +- processMap.put("execute_cql3_query", new execute_cql3_query()); +- processMap.put("prepare_cql_query", new prepare_cql_query()); +- processMap.put("prepare_cql3_query", new prepare_cql3_query()); +- processMap.put("execute_prepared_cql_query", new execute_prepared_cql_query()); +- processMap.put("execute_prepared_cql3_query", new execute_prepared_cql3_query()); +- processMap.put("set_cql_version", new set_cql_version()); +- return processMap; +- } +- +- public static class login extends org.apache.thrift.AsyncProcessFunction { +- public login() { +- super("login"); +- } +- +- public login_args getEmptyArgsInstance() { +- return new login_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Void o) { +- login_result result = new login_result(); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- login_result result = new login_result(); +- if (e instanceof AuthenticationException) { +- result.authnx = (AuthenticationException) e; +- result.setAuthnxIsSet(true); +- msg = result; +- } +- else if (e instanceof AuthorizationException) { +- result.authzx = (AuthorizationException) e; +- result.setAuthzxIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, login_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.login(args.auth_request,resultHandler); +- } +- } +- +- public static class set_keyspace extends org.apache.thrift.AsyncProcessFunction { +- public set_keyspace() { +- super("set_keyspace"); +- } +- +- public set_keyspace_args getEmptyArgsInstance() { +- return new set_keyspace_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Void o) { +- set_keyspace_result result = new set_keyspace_result(); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- set_keyspace_result result = new set_keyspace_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, set_keyspace_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.set_keyspace(args.keyspace,resultHandler); +- } +- } +- +- public static class get extends org.apache.thrift.AsyncProcessFunction { +- public get() { +- super("get"); +- } +- +- public get_args getEmptyArgsInstance() { +- return new get_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(ColumnOrSuperColumn o) { +- get_result result = new get_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- get_result result = new get_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof NotFoundException) { +- result.nfe = (NotFoundException) e; +- result.setNfeIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, get_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.get(args.key, args.column_path, args.consistency_level,resultHandler); +- } +- } +- +- public static class get_slice extends org.apache.thrift.AsyncProcessFunction> { +- public get_slice() { +- super("get_slice"); +- } +- +- public get_slice_args getEmptyArgsInstance() { +- return new get_slice_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(List o) { +- get_slice_result result = new get_slice_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- get_slice_result result = new get_slice_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, get_slice_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.get_slice(args.key, args.column_parent, args.predicate, args.consistency_level,resultHandler); +- } +- } +- +- public static class get_count extends org.apache.thrift.AsyncProcessFunction { +- public get_count() { +- super("get_count"); +- } +- +- public get_count_args getEmptyArgsInstance() { +- return new get_count_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Integer o) { +- get_count_result result = new get_count_result(); +- result.success = o; +- result.setSuccessIsSet(true); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- get_count_result result = new get_count_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, get_count_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.get_count(args.key, args.column_parent, args.predicate, args.consistency_level,resultHandler); +- } +- } +- +- public static class multiget_slice extends org.apache.thrift.AsyncProcessFunction>> { +- public multiget_slice() { +- super("multiget_slice"); +- } +- +- public multiget_slice_args getEmptyArgsInstance() { +- return new multiget_slice_args(); +- } +- +- public AsyncMethodCallback>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>>() { +- public void onComplete(Map> o) { +- multiget_slice_result result = new multiget_slice_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- multiget_slice_result result = new multiget_slice_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, multiget_slice_args args, org.apache.thrift.async.AsyncMethodCallback>> resultHandler) throws TException { +- iface.multiget_slice(args.keys, args.column_parent, args.predicate, args.consistency_level,resultHandler); +- } +- } +- +- public static class multiget_count extends org.apache.thrift.AsyncProcessFunction> { +- public multiget_count() { +- super("multiget_count"); +- } +- +- public multiget_count_args getEmptyArgsInstance() { +- return new multiget_count_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(Map o) { +- multiget_count_result result = new multiget_count_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- multiget_count_result result = new multiget_count_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, multiget_count_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.multiget_count(args.keys, args.column_parent, args.predicate, args.consistency_level,resultHandler); +- } +- } +- +- public static class get_range_slices extends org.apache.thrift.AsyncProcessFunction> { +- public get_range_slices() { +- super("get_range_slices"); +- } +- +- public get_range_slices_args getEmptyArgsInstance() { +- return new get_range_slices_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(List o) { +- get_range_slices_result result = new get_range_slices_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- get_range_slices_result result = new get_range_slices_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, get_range_slices_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.get_range_slices(args.column_parent, args.predicate, args.range, args.consistency_level,resultHandler); +- } +- } +- +- public static class get_paged_slice extends org.apache.thrift.AsyncProcessFunction> { +- public get_paged_slice() { +- super("get_paged_slice"); +- } +- +- public get_paged_slice_args getEmptyArgsInstance() { +- return new get_paged_slice_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(List o) { +- get_paged_slice_result result = new get_paged_slice_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- get_paged_slice_result result = new get_paged_slice_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, get_paged_slice_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.get_paged_slice(args.column_family, args.range, args.start_column, args.consistency_level,resultHandler); +- } +- } +- +- public static class get_indexed_slices extends org.apache.thrift.AsyncProcessFunction> { +- public get_indexed_slices() { +- super("get_indexed_slices"); +- } +- +- public get_indexed_slices_args getEmptyArgsInstance() { +- return new get_indexed_slices_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(List o) { +- get_indexed_slices_result result = new get_indexed_slices_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- get_indexed_slices_result result = new get_indexed_slices_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, get_indexed_slices_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.get_indexed_slices(args.column_parent, args.index_clause, args.column_predicate, args.consistency_level,resultHandler); +- } +- } +- +- public static class insert extends org.apache.thrift.AsyncProcessFunction { +- public insert() { +- super("insert"); +- } +- +- public insert_args getEmptyArgsInstance() { +- return new insert_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Void o) { +- insert_result result = new insert_result(); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- insert_result result = new insert_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, insert_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.insert(args.key, args.column_parent, args.column, args.consistency_level,resultHandler); +- } +- } +- +- public static class add extends org.apache.thrift.AsyncProcessFunction { +- public add() { +- super("add"); +- } +- +- public add_args getEmptyArgsInstance() { +- return new add_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Void o) { +- add_result result = new add_result(); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- add_result result = new add_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, add_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.add(args.key, args.column_parent, args.column, args.consistency_level,resultHandler); +- } +- } +- +- public static class cas extends org.apache.thrift.AsyncProcessFunction { +- public cas() { +- super("cas"); +- } +- +- public cas_args getEmptyArgsInstance() { +- return new cas_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(CASResult o) { +- cas_result result = new cas_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- cas_result result = new cas_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, cas_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.cas(args.key, args.column_family, args.expected, args.updates, args.serial_consistency_level, args.commit_consistency_level,resultHandler); +- } +- } +- +- public static class remove extends org.apache.thrift.AsyncProcessFunction { +- public remove() { +- super("remove"); +- } +- +- public remove_args getEmptyArgsInstance() { +- return new remove_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Void o) { +- remove_result result = new remove_result(); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- remove_result result = new remove_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, remove_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.remove(args.key, args.column_path, args.timestamp, args.consistency_level,resultHandler); +- } +- } +- +- public static class remove_counter extends org.apache.thrift.AsyncProcessFunction { +- public remove_counter() { +- super("remove_counter"); +- } +- +- public remove_counter_args getEmptyArgsInstance() { +- return new remove_counter_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Void o) { +- remove_counter_result result = new remove_counter_result(); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- remove_counter_result result = new remove_counter_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, remove_counter_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.remove_counter(args.key, args.path, args.consistency_level,resultHandler); +- } +- } +- +- public static class batch_mutate extends org.apache.thrift.AsyncProcessFunction { +- public batch_mutate() { +- super("batch_mutate"); +- } +- +- public batch_mutate_args getEmptyArgsInstance() { +- return new batch_mutate_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Void o) { +- batch_mutate_result result = new batch_mutate_result(); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- batch_mutate_result result = new batch_mutate_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, batch_mutate_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.batch_mutate(args.mutation_map, args.consistency_level,resultHandler); +- } +- } +- +- public static class atomic_batch_mutate extends org.apache.thrift.AsyncProcessFunction { +- public atomic_batch_mutate() { +- super("atomic_batch_mutate"); +- } +- +- public atomic_batch_mutate_args getEmptyArgsInstance() { +- return new atomic_batch_mutate_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Void o) { +- atomic_batch_mutate_result result = new atomic_batch_mutate_result(); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- atomic_batch_mutate_result result = new atomic_batch_mutate_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, atomic_batch_mutate_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.atomic_batch_mutate(args.mutation_map, args.consistency_level,resultHandler); +- } +- } +- +- public static class truncate extends org.apache.thrift.AsyncProcessFunction { +- public truncate() { +- super("truncate"); +- } +- +- public truncate_args getEmptyArgsInstance() { +- return new truncate_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Void o) { +- truncate_result result = new truncate_result(); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- truncate_result result = new truncate_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, truncate_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.truncate(args.cfname,resultHandler); +- } +- } +- +- public static class get_multi_slice extends org.apache.thrift.AsyncProcessFunction> { +- public get_multi_slice() { +- super("get_multi_slice"); +- } +- +- public get_multi_slice_args getEmptyArgsInstance() { +- return new get_multi_slice_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(List o) { +- get_multi_slice_result result = new get_multi_slice_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- get_multi_slice_result result = new get_multi_slice_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, get_multi_slice_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.get_multi_slice(args.request,resultHandler); +- } +- } +- +- public static class describe_schema_versions extends org.apache.thrift.AsyncProcessFunction>> { +- public describe_schema_versions() { +- super("describe_schema_versions"); +- } +- +- public describe_schema_versions_args getEmptyArgsInstance() { +- return new describe_schema_versions_args(); +- } +- +- public AsyncMethodCallback>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>>() { +- public void onComplete(Map> o) { +- describe_schema_versions_result result = new describe_schema_versions_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_schema_versions_result result = new describe_schema_versions_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_schema_versions_args args, org.apache.thrift.async.AsyncMethodCallback>> resultHandler) throws TException { +- iface.describe_schema_versions(resultHandler); +- } +- } +- +- public static class describe_keyspaces extends org.apache.thrift.AsyncProcessFunction> { +- public describe_keyspaces() { +- super("describe_keyspaces"); +- } +- +- public describe_keyspaces_args getEmptyArgsInstance() { +- return new describe_keyspaces_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(List o) { +- describe_keyspaces_result result = new describe_keyspaces_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_keyspaces_result result = new describe_keyspaces_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_keyspaces_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.describe_keyspaces(resultHandler); +- } +- } +- +- public static class describe_cluster_name extends org.apache.thrift.AsyncProcessFunction { +- public describe_cluster_name() { +- super("describe_cluster_name"); +- } +- +- public describe_cluster_name_args getEmptyArgsInstance() { +- return new describe_cluster_name_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(String o) { +- describe_cluster_name_result result = new describe_cluster_name_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_cluster_name_result result = new describe_cluster_name_result(); +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_cluster_name_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.describe_cluster_name(resultHandler); +- } +- } +- +- public static class describe_version extends org.apache.thrift.AsyncProcessFunction { +- public describe_version() { +- super("describe_version"); +- } +- +- public describe_version_args getEmptyArgsInstance() { +- return new describe_version_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(String o) { +- describe_version_result result = new describe_version_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_version_result result = new describe_version_result(); +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_version_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.describe_version(resultHandler); +- } +- } +- +- public static class describe_ring extends org.apache.thrift.AsyncProcessFunction> { +- public describe_ring() { +- super("describe_ring"); +- } +- +- public describe_ring_args getEmptyArgsInstance() { +- return new describe_ring_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(List o) { +- describe_ring_result result = new describe_ring_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_ring_result result = new describe_ring_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_ring_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.describe_ring(args.keyspace,resultHandler); +- } +- } +- +- public static class describe_local_ring extends org.apache.thrift.AsyncProcessFunction> { +- public describe_local_ring() { +- super("describe_local_ring"); +- } +- +- public describe_local_ring_args getEmptyArgsInstance() { +- return new describe_local_ring_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(List o) { +- describe_local_ring_result result = new describe_local_ring_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_local_ring_result result = new describe_local_ring_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_local_ring_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.describe_local_ring(args.keyspace,resultHandler); +- } +- } +- +- public static class describe_token_map extends org.apache.thrift.AsyncProcessFunction> { +- public describe_token_map() { +- super("describe_token_map"); +- } +- +- public describe_token_map_args getEmptyArgsInstance() { +- return new describe_token_map_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(Map o) { +- describe_token_map_result result = new describe_token_map_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_token_map_result result = new describe_token_map_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_token_map_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.describe_token_map(resultHandler); +- } +- } +- +- public static class describe_partitioner extends org.apache.thrift.AsyncProcessFunction { +- public describe_partitioner() { +- super("describe_partitioner"); +- } +- +- public describe_partitioner_args getEmptyArgsInstance() { +- return new describe_partitioner_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(String o) { +- describe_partitioner_result result = new describe_partitioner_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_partitioner_result result = new describe_partitioner_result(); +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_partitioner_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.describe_partitioner(resultHandler); +- } +- } +- +- public static class describe_snitch extends org.apache.thrift.AsyncProcessFunction { +- public describe_snitch() { +- super("describe_snitch"); +- } +- +- public describe_snitch_args getEmptyArgsInstance() { +- return new describe_snitch_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(String o) { +- describe_snitch_result result = new describe_snitch_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_snitch_result result = new describe_snitch_result(); +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_snitch_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.describe_snitch(resultHandler); +- } +- } +- +- public static class describe_keyspace extends org.apache.thrift.AsyncProcessFunction { +- public describe_keyspace() { +- super("describe_keyspace"); +- } +- +- public describe_keyspace_args getEmptyArgsInstance() { +- return new describe_keyspace_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(KsDef o) { +- describe_keyspace_result result = new describe_keyspace_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_keyspace_result result = new describe_keyspace_result(); +- if (e instanceof NotFoundException) { +- result.nfe = (NotFoundException) e; +- result.setNfeIsSet(true); +- msg = result; +- } +- else if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_keyspace_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.describe_keyspace(args.keyspace,resultHandler); +- } +- } +- +- public static class describe_splits extends org.apache.thrift.AsyncProcessFunction> { +- public describe_splits() { +- super("describe_splits"); +- } +- +- public describe_splits_args getEmptyArgsInstance() { +- return new describe_splits_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(List o) { +- describe_splits_result result = new describe_splits_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_splits_result result = new describe_splits_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_splits_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.describe_splits(args.cfName, args.start_token, args.end_token, args.keys_per_split,resultHandler); +- } +- } +- +- public static class trace_next_query extends org.apache.thrift.AsyncProcessFunction { +- public trace_next_query() { +- super("trace_next_query"); +- } +- +- public trace_next_query_args getEmptyArgsInstance() { +- return new trace_next_query_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(ByteBuffer o) { +- trace_next_query_result result = new trace_next_query_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- trace_next_query_result result = new trace_next_query_result(); +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, trace_next_query_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.trace_next_query(resultHandler); +- } +- } +- +- public static class describe_splits_ex extends org.apache.thrift.AsyncProcessFunction> { +- public describe_splits_ex() { +- super("describe_splits_ex"); +- } +- +- public describe_splits_ex_args getEmptyArgsInstance() { +- return new describe_splits_ex_args(); +- } +- +- public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback>() { +- public void onComplete(List o) { +- describe_splits_ex_result result = new describe_splits_ex_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- describe_splits_ex_result result = new describe_splits_ex_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, describe_splits_ex_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { +- iface.describe_splits_ex(args.cfName, args.start_token, args.end_token, args.keys_per_split,resultHandler); +- } +- } +- +- public static class system_add_column_family extends org.apache.thrift.AsyncProcessFunction { +- public system_add_column_family() { +- super("system_add_column_family"); +- } +- +- public system_add_column_family_args getEmptyArgsInstance() { +- return new system_add_column_family_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(String o) { +- system_add_column_family_result result = new system_add_column_family_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- system_add_column_family_result result = new system_add_column_family_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof SchemaDisagreementException) { +- result.sde = (SchemaDisagreementException) e; +- result.setSdeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, system_add_column_family_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.system_add_column_family(args.cf_def,resultHandler); +- } +- } +- +- public static class system_drop_column_family extends org.apache.thrift.AsyncProcessFunction { +- public system_drop_column_family() { +- super("system_drop_column_family"); +- } +- +- public system_drop_column_family_args getEmptyArgsInstance() { +- return new system_drop_column_family_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(String o) { +- system_drop_column_family_result result = new system_drop_column_family_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- system_drop_column_family_result result = new system_drop_column_family_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof SchemaDisagreementException) { +- result.sde = (SchemaDisagreementException) e; +- result.setSdeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, system_drop_column_family_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.system_drop_column_family(args.column_family,resultHandler); +- } +- } +- +- public static class system_add_keyspace extends org.apache.thrift.AsyncProcessFunction { +- public system_add_keyspace() { +- super("system_add_keyspace"); +- } +- +- public system_add_keyspace_args getEmptyArgsInstance() { +- return new system_add_keyspace_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(String o) { +- system_add_keyspace_result result = new system_add_keyspace_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- system_add_keyspace_result result = new system_add_keyspace_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof SchemaDisagreementException) { +- result.sde = (SchemaDisagreementException) e; +- result.setSdeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, system_add_keyspace_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.system_add_keyspace(args.ks_def,resultHandler); +- } +- } +- +- public static class system_drop_keyspace extends org.apache.thrift.AsyncProcessFunction { +- public system_drop_keyspace() { +- super("system_drop_keyspace"); +- } +- +- public system_drop_keyspace_args getEmptyArgsInstance() { +- return new system_drop_keyspace_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(String o) { +- system_drop_keyspace_result result = new system_drop_keyspace_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- system_drop_keyspace_result result = new system_drop_keyspace_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof SchemaDisagreementException) { +- result.sde = (SchemaDisagreementException) e; +- result.setSdeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, system_drop_keyspace_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.system_drop_keyspace(args.keyspace,resultHandler); +- } +- } +- +- public static class system_update_keyspace extends org.apache.thrift.AsyncProcessFunction { +- public system_update_keyspace() { +- super("system_update_keyspace"); +- } +- +- public system_update_keyspace_args getEmptyArgsInstance() { +- return new system_update_keyspace_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(String o) { +- system_update_keyspace_result result = new system_update_keyspace_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- system_update_keyspace_result result = new system_update_keyspace_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof SchemaDisagreementException) { +- result.sde = (SchemaDisagreementException) e; +- result.setSdeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, system_update_keyspace_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.system_update_keyspace(args.ks_def,resultHandler); +- } +- } +- +- public static class system_update_column_family extends org.apache.thrift.AsyncProcessFunction { +- public system_update_column_family() { +- super("system_update_column_family"); +- } +- +- public system_update_column_family_args getEmptyArgsInstance() { +- return new system_update_column_family_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(String o) { +- system_update_column_family_result result = new system_update_column_family_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- system_update_column_family_result result = new system_update_column_family_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof SchemaDisagreementException) { +- result.sde = (SchemaDisagreementException) e; +- result.setSdeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, system_update_column_family_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.system_update_column_family(args.cf_def,resultHandler); +- } +- } +- +- public static class execute_cql_query extends org.apache.thrift.AsyncProcessFunction { +- public execute_cql_query() { +- super("execute_cql_query"); +- } +- +- public execute_cql_query_args getEmptyArgsInstance() { +- return new execute_cql_query_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(CqlResult o) { +- execute_cql_query_result result = new execute_cql_query_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- execute_cql_query_result result = new execute_cql_query_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else if (e instanceof SchemaDisagreementException) { +- result.sde = (SchemaDisagreementException) e; +- result.setSdeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, execute_cql_query_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.execute_cql_query(args.query, args.compression,resultHandler); +- } +- } +- +- public static class execute_cql3_query extends org.apache.thrift.AsyncProcessFunction { +- public execute_cql3_query() { +- super("execute_cql3_query"); +- } +- +- public execute_cql3_query_args getEmptyArgsInstance() { +- return new execute_cql3_query_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(CqlResult o) { +- execute_cql3_query_result result = new execute_cql3_query_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- execute_cql3_query_result result = new execute_cql3_query_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else if (e instanceof SchemaDisagreementException) { +- result.sde = (SchemaDisagreementException) e; +- result.setSdeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, execute_cql3_query_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.execute_cql3_query(args.query, args.compression, args.consistency,resultHandler); +- } +- } +- +- public static class prepare_cql_query extends org.apache.thrift.AsyncProcessFunction { +- public prepare_cql_query() { +- super("prepare_cql_query"); +- } +- +- public prepare_cql_query_args getEmptyArgsInstance() { +- return new prepare_cql_query_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(CqlPreparedResult o) { +- prepare_cql_query_result result = new prepare_cql_query_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- prepare_cql_query_result result = new prepare_cql_query_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, prepare_cql_query_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.prepare_cql_query(args.query, args.compression,resultHandler); +- } +- } +- +- public static class prepare_cql3_query extends org.apache.thrift.AsyncProcessFunction { +- public prepare_cql3_query() { +- super("prepare_cql3_query"); +- } +- +- public prepare_cql3_query_args getEmptyArgsInstance() { +- return new prepare_cql3_query_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(CqlPreparedResult o) { +- prepare_cql3_query_result result = new prepare_cql3_query_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- prepare_cql3_query_result result = new prepare_cql3_query_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, prepare_cql3_query_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.prepare_cql3_query(args.query, args.compression,resultHandler); +- } +- } +- +- public static class execute_prepared_cql_query extends org.apache.thrift.AsyncProcessFunction { +- public execute_prepared_cql_query() { +- super("execute_prepared_cql_query"); +- } +- +- public execute_prepared_cql_query_args getEmptyArgsInstance() { +- return new execute_prepared_cql_query_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(CqlResult o) { +- execute_prepared_cql_query_result result = new execute_prepared_cql_query_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- execute_prepared_cql_query_result result = new execute_prepared_cql_query_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else if (e instanceof SchemaDisagreementException) { +- result.sde = (SchemaDisagreementException) e; +- result.setSdeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, execute_prepared_cql_query_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.execute_prepared_cql_query(args.itemId, args.values,resultHandler); +- } +- } +- +- public static class execute_prepared_cql3_query extends org.apache.thrift.AsyncProcessFunction { +- public execute_prepared_cql3_query() { +- super("execute_prepared_cql3_query"); +- } +- +- public execute_prepared_cql3_query_args getEmptyArgsInstance() { +- return new execute_prepared_cql3_query_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(CqlResult o) { +- execute_prepared_cql3_query_result result = new execute_prepared_cql3_query_result(); +- result.success = o; +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- execute_prepared_cql3_query_result result = new execute_prepared_cql3_query_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else if (e instanceof UnavailableException) { +- result.ue = (UnavailableException) e; +- result.setUeIsSet(true); +- msg = result; +- } +- else if (e instanceof TimedOutException) { +- result.te = (TimedOutException) e; +- result.setTeIsSet(true); +- msg = result; +- } +- else if (e instanceof SchemaDisagreementException) { +- result.sde = (SchemaDisagreementException) e; +- result.setSdeIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, execute_prepared_cql3_query_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.execute_prepared_cql3_query(args.itemId, args.values, args.consistency,resultHandler); +- } +- } +- +- public static class set_cql_version extends org.apache.thrift.AsyncProcessFunction { +- public set_cql_version() { +- super("set_cql_version"); +- } +- +- public set_cql_version_args getEmptyArgsInstance() { +- return new set_cql_version_args(); +- } +- +- public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { +- final org.apache.thrift.AsyncProcessFunction fcall = this; +- return new AsyncMethodCallback() { +- public void onComplete(Void o) { +- set_cql_version_result result = new set_cql_version_result(); +- try { +- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); +- return; +- } catch (Exception e) { +- LOGGER.error("Exception writing to internal frame buffer", e); +- } +- fb.close(); +- } +- public void onError(Exception e) { +- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; +- org.apache.thrift.TBase msg; +- set_cql_version_result result = new set_cql_version_result(); +- if (e instanceof InvalidRequestException) { +- result.ire = (InvalidRequestException) e; +- result.setIreIsSet(true); +- msg = result; +- } +- else +- { +- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; +- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); +- } +- try { +- fcall.sendResponse(fb,msg,msgType,seqid); +- return; +- } catch (Exception ex) { +- LOGGER.error("Exception writing to internal frame buffer", ex); +- } +- fb.close(); +- } +- }; +- } +- +- protected boolean isOneway() { +- return false; +- } +- +- public void start(I iface, set_cql_version_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { +- iface.set_cql_version(args.version,resultHandler); +- } +- } +- +- } +- +- public static class login_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("login_args"); +- +- private static final org.apache.thrift.protocol.TField AUTH_REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("auth_request", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new login_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new login_argsTupleSchemeFactory()); +- } +- +- public AuthenticationRequest auth_request; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- AUTH_REQUEST((short)1, "auth_request"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // AUTH_REQUEST +- return AUTH_REQUEST; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.AUTH_REQUEST, new org.apache.thrift.meta_data.FieldMetaData("auth_request", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AuthenticationRequest.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(login_args.class, metaDataMap); +- } +- +- public login_args() { +- } +- +- public login_args( +- AuthenticationRequest auth_request) +- { +- this(); +- this.auth_request = auth_request; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public login_args(login_args other) { +- if (other.isSetAuth_request()) { +- this.auth_request = new AuthenticationRequest(other.auth_request); +- } +- } +- +- public login_args deepCopy() { +- return new login_args(this); +- } +- +- @Override +- public void clear() { +- this.auth_request = null; +- } +- +- public AuthenticationRequest getAuth_request() { +- return this.auth_request; +- } +- +- public login_args setAuth_request(AuthenticationRequest auth_request) { +- this.auth_request = auth_request; +- return this; +- } +- +- public void unsetAuth_request() { +- this.auth_request = null; +- } +- +- /** Returns true if field auth_request is set (has been assigned a value) and false otherwise */ +- public boolean isSetAuth_request() { +- return this.auth_request != null; +- } +- +- public void setAuth_requestIsSet(boolean value) { +- if (!value) { +- this.auth_request = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case AUTH_REQUEST: +- if (value == null) { +- unsetAuth_request(); +- } else { +- setAuth_request((AuthenticationRequest)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case AUTH_REQUEST: +- return getAuth_request(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case AUTH_REQUEST: +- return isSetAuth_request(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof login_args) +- return this.equals((login_args)that); +- return false; +- } +- +- public boolean equals(login_args that) { +- if (that == null) +- return false; +- +- boolean this_present_auth_request = true && this.isSetAuth_request(); +- boolean that_present_auth_request = true && that.isSetAuth_request(); +- if (this_present_auth_request || that_present_auth_request) { +- if (!(this_present_auth_request && that_present_auth_request)) +- return false; +- if (!this.auth_request.equals(that.auth_request)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_auth_request = true && (isSetAuth_request()); +- builder.append(present_auth_request); +- if (present_auth_request) +- builder.append(auth_request); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(login_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetAuth_request()).compareTo(other.isSetAuth_request()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetAuth_request()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.auth_request, other.auth_request); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("login_args("); +- boolean first = true; +- +- sb.append("auth_request:"); +- if (this.auth_request == null) { +- sb.append("null"); +- } else { +- sb.append(this.auth_request); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (auth_request == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'auth_request' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (auth_request != null) { +- auth_request.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class login_argsStandardSchemeFactory implements SchemeFactory { +- public login_argsStandardScheme getScheme() { +- return new login_argsStandardScheme(); +- } +- } +- +- private static class login_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, login_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // AUTH_REQUEST +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.auth_request = new AuthenticationRequest(); +- struct.auth_request.read(iprot); +- struct.setAuth_requestIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, login_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.auth_request != null) { +- oprot.writeFieldBegin(AUTH_REQUEST_FIELD_DESC); +- struct.auth_request.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class login_argsTupleSchemeFactory implements SchemeFactory { +- public login_argsTupleScheme getScheme() { +- return new login_argsTupleScheme(); +- } +- } +- +- private static class login_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, login_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- struct.auth_request.write(oprot); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, login_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.auth_request = new AuthenticationRequest(); +- struct.auth_request.read(iprot); +- struct.setAuth_requestIsSet(true); +- } +- } +- +- } +- +- public static class login_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("login_result"); +- +- private static final org.apache.thrift.protocol.TField AUTHNX_FIELD_DESC = new org.apache.thrift.protocol.TField("authnx", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField AUTHZX_FIELD_DESC = new org.apache.thrift.protocol.TField("authzx", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new login_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new login_resultTupleSchemeFactory()); +- } +- +- public AuthenticationException authnx; // required +- public AuthorizationException authzx; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- AUTHNX((short)1, "authnx"), +- AUTHZX((short)2, "authzx"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // AUTHNX +- return AUTHNX; +- case 2: // AUTHZX +- return AUTHZX; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.AUTHNX, new org.apache.thrift.meta_data.FieldMetaData("authnx", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.AUTHZX, new org.apache.thrift.meta_data.FieldMetaData("authzx", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(login_result.class, metaDataMap); +- } +- +- public login_result() { +- } +- +- public login_result( +- AuthenticationException authnx, +- AuthorizationException authzx) +- { +- this(); +- this.authnx = authnx; +- this.authzx = authzx; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public login_result(login_result other) { +- if (other.isSetAuthnx()) { +- this.authnx = new AuthenticationException(other.authnx); +- } +- if (other.isSetAuthzx()) { +- this.authzx = new AuthorizationException(other.authzx); +- } +- } +- +- public login_result deepCopy() { +- return new login_result(this); +- } +- +- @Override +- public void clear() { +- this.authnx = null; +- this.authzx = null; +- } +- +- public AuthenticationException getAuthnx() { +- return this.authnx; +- } +- +- public login_result setAuthnx(AuthenticationException authnx) { +- this.authnx = authnx; +- return this; +- } +- +- public void unsetAuthnx() { +- this.authnx = null; +- } +- +- /** Returns true if field authnx is set (has been assigned a value) and false otherwise */ +- public boolean isSetAuthnx() { +- return this.authnx != null; +- } +- +- public void setAuthnxIsSet(boolean value) { +- if (!value) { +- this.authnx = null; +- } +- } +- +- public AuthorizationException getAuthzx() { +- return this.authzx; +- } +- +- public login_result setAuthzx(AuthorizationException authzx) { +- this.authzx = authzx; +- return this; +- } +- +- public void unsetAuthzx() { +- this.authzx = null; +- } +- +- /** Returns true if field authzx is set (has been assigned a value) and false otherwise */ +- public boolean isSetAuthzx() { +- return this.authzx != null; +- } +- +- public void setAuthzxIsSet(boolean value) { +- if (!value) { +- this.authzx = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case AUTHNX: +- if (value == null) { +- unsetAuthnx(); +- } else { +- setAuthnx((AuthenticationException)value); +- } +- break; +- +- case AUTHZX: +- if (value == null) { +- unsetAuthzx(); +- } else { +- setAuthzx((AuthorizationException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case AUTHNX: +- return getAuthnx(); +- +- case AUTHZX: +- return getAuthzx(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case AUTHNX: +- return isSetAuthnx(); +- case AUTHZX: +- return isSetAuthzx(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof login_result) +- return this.equals((login_result)that); +- return false; +- } +- +- public boolean equals(login_result that) { +- if (that == null) +- return false; +- +- boolean this_present_authnx = true && this.isSetAuthnx(); +- boolean that_present_authnx = true && that.isSetAuthnx(); +- if (this_present_authnx || that_present_authnx) { +- if (!(this_present_authnx && that_present_authnx)) +- return false; +- if (!this.authnx.equals(that.authnx)) +- return false; +- } +- +- boolean this_present_authzx = true && this.isSetAuthzx(); +- boolean that_present_authzx = true && that.isSetAuthzx(); +- if (this_present_authzx || that_present_authzx) { +- if (!(this_present_authzx && that_present_authzx)) +- return false; +- if (!this.authzx.equals(that.authzx)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_authnx = true && (isSetAuthnx()); +- builder.append(present_authnx); +- if (present_authnx) +- builder.append(authnx); +- +- boolean present_authzx = true && (isSetAuthzx()); +- builder.append(present_authzx); +- if (present_authzx) +- builder.append(authzx); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(login_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetAuthnx()).compareTo(other.isSetAuthnx()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetAuthnx()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authnx, other.authnx); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetAuthzx()).compareTo(other.isSetAuthzx()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetAuthzx()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authzx, other.authzx); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("login_result("); +- boolean first = true; +- +- sb.append("authnx:"); +- if (this.authnx == null) { +- sb.append("null"); +- } else { +- sb.append(this.authnx); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("authzx:"); +- if (this.authzx == null) { +- sb.append("null"); +- } else { +- sb.append(this.authzx); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class login_resultStandardSchemeFactory implements SchemeFactory { +- public login_resultStandardScheme getScheme() { +- return new login_resultStandardScheme(); +- } +- } +- +- private static class login_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, login_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // AUTHNX +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.authnx = new AuthenticationException(); +- struct.authnx.read(iprot); +- struct.setAuthnxIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // AUTHZX +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.authzx = new AuthorizationException(); +- struct.authzx.read(iprot); +- struct.setAuthzxIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, login_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.authnx != null) { +- oprot.writeFieldBegin(AUTHNX_FIELD_DESC); +- struct.authnx.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.authzx != null) { +- oprot.writeFieldBegin(AUTHZX_FIELD_DESC); +- struct.authzx.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class login_resultTupleSchemeFactory implements SchemeFactory { +- public login_resultTupleScheme getScheme() { +- return new login_resultTupleScheme(); +- } +- } +- +- private static class login_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, login_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetAuthnx()) { +- optionals.set(0); +- } +- if (struct.isSetAuthzx()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetAuthnx()) { +- struct.authnx.write(oprot); +- } +- if (struct.isSetAuthzx()) { +- struct.authzx.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, login_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- struct.authnx = new AuthenticationException(); +- struct.authnx.read(iprot); +- struct.setAuthnxIsSet(true); +- } +- if (incoming.get(1)) { +- struct.authzx = new AuthorizationException(); +- struct.authzx.read(iprot); +- struct.setAuthzxIsSet(true); +- } +- } +- } +- +- } +- +- public static class set_keyspace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("set_keyspace_args"); +- +- private static final org.apache.thrift.protocol.TField KEYSPACE_FIELD_DESC = new org.apache.thrift.protocol.TField("keyspace", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new set_keyspace_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new set_keyspace_argsTupleSchemeFactory()); +- } +- +- public String keyspace; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEYSPACE((short)1, "keyspace"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEYSPACE +- return KEYSPACE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEYSPACE, new org.apache.thrift.meta_data.FieldMetaData("keyspace", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(set_keyspace_args.class, metaDataMap); +- } +- +- public set_keyspace_args() { +- } +- +- public set_keyspace_args( +- String keyspace) +- { +- this(); +- this.keyspace = keyspace; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public set_keyspace_args(set_keyspace_args other) { +- if (other.isSetKeyspace()) { +- this.keyspace = other.keyspace; +- } +- } +- +- public set_keyspace_args deepCopy() { +- return new set_keyspace_args(this); +- } +- +- @Override +- public void clear() { +- this.keyspace = null; +- } +- +- public String getKeyspace() { +- return this.keyspace; +- } +- +- public set_keyspace_args setKeyspace(String keyspace) { +- this.keyspace = keyspace; +- return this; +- } +- +- public void unsetKeyspace() { +- this.keyspace = null; +- } +- +- /** Returns true if field keyspace is set (has been assigned a value) and false otherwise */ +- public boolean isSetKeyspace() { +- return this.keyspace != null; +- } +- +- public void setKeyspaceIsSet(boolean value) { +- if (!value) { +- this.keyspace = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEYSPACE: +- if (value == null) { +- unsetKeyspace(); +- } else { +- setKeyspace((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEYSPACE: +- return getKeyspace(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEYSPACE: +- return isSetKeyspace(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof set_keyspace_args) +- return this.equals((set_keyspace_args)that); +- return false; +- } +- +- public boolean equals(set_keyspace_args that) { +- if (that == null) +- return false; +- +- boolean this_present_keyspace = true && this.isSetKeyspace(); +- boolean that_present_keyspace = true && that.isSetKeyspace(); +- if (this_present_keyspace || that_present_keyspace) { +- if (!(this_present_keyspace && that_present_keyspace)) +- return false; +- if (!this.keyspace.equals(that.keyspace)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_keyspace = true && (isSetKeyspace()); +- builder.append(present_keyspace); +- if (present_keyspace) +- builder.append(keyspace); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(set_keyspace_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKeyspace()).compareTo(other.isSetKeyspace()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKeyspace()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keyspace, other.keyspace); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("set_keyspace_args("); +- boolean first = true; +- +- sb.append("keyspace:"); +- if (this.keyspace == null) { +- sb.append("null"); +- } else { +- sb.append(this.keyspace); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (keyspace == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'keyspace' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class set_keyspace_argsStandardSchemeFactory implements SchemeFactory { +- public set_keyspace_argsStandardScheme getScheme() { +- return new set_keyspace_argsStandardScheme(); +- } +- } +- +- private static class set_keyspace_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, set_keyspace_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEYSPACE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, set_keyspace_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.keyspace != null) { +- oprot.writeFieldBegin(KEYSPACE_FIELD_DESC); +- oprot.writeString(struct.keyspace); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class set_keyspace_argsTupleSchemeFactory implements SchemeFactory { +- public set_keyspace_argsTupleScheme getScheme() { +- return new set_keyspace_argsTupleScheme(); +- } +- } +- +- private static class set_keyspace_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, set_keyspace_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.keyspace); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, set_keyspace_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } +- } +- +- } +- +- public static class set_keyspace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("set_keyspace_result"); +- +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new set_keyspace_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new set_keyspace_resultTupleSchemeFactory()); +- } +- +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(set_keyspace_result.class, metaDataMap); +- } +- +- public set_keyspace_result() { +- } +- +- public set_keyspace_result( +- InvalidRequestException ire) +- { +- this(); +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public set_keyspace_result(set_keyspace_result other) { +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public set_keyspace_result deepCopy() { +- return new set_keyspace_result(this); +- } +- +- @Override +- public void clear() { +- this.ire = null; +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public set_keyspace_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof set_keyspace_result) +- return this.equals((set_keyspace_result)that); +- return false; +- } +- +- public boolean equals(set_keyspace_result that) { +- if (that == null) +- return false; +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(set_keyspace_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("set_keyspace_result("); +- boolean first = true; +- +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class set_keyspace_resultStandardSchemeFactory implements SchemeFactory { +- public set_keyspace_resultStandardScheme getScheme() { +- return new set_keyspace_resultStandardScheme(); +- } +- } +- +- private static class set_keyspace_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, set_keyspace_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, set_keyspace_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class set_keyspace_resultTupleSchemeFactory implements SchemeFactory { +- public set_keyspace_resultTupleScheme getScheme() { +- return new set_keyspace_resultTupleScheme(); +- } +- } +- +- private static class set_keyspace_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, set_keyspace_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetIre()) { +- optionals.set(0); +- } +- oprot.writeBitSet(optionals, 1); +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, set_keyspace_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(1); +- if (incoming.get(0)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class get_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_args"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMN_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("column_path", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public ColumnPath column_path; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COLUMN_PATH((short)2, "column_path"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)3, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COLUMN_PATH +- return COLUMN_PATH; +- case 3: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMN_PATH, new org.apache.thrift.meta_data.FieldMetaData("column_path", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnPath.class))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_args.class, metaDataMap); +- } +- +- public get_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public get_args( +- ByteBuffer key, +- ColumnPath column_path, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.key = key; +- this.column_path = column_path; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_args(get_args other) { +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetColumn_path()) { +- this.column_path = new ColumnPath(other.column_path); +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public get_args deepCopy() { +- return new get_args(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.column_path = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public get_args setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public get_args setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public ColumnPath getColumn_path() { +- return this.column_path; +- } +- +- public get_args setColumn_path(ColumnPath column_path) { +- this.column_path = column_path; +- return this; +- } +- +- public void unsetColumn_path() { +- this.column_path = null; +- } +- +- /** Returns true if field column_path is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_path() { +- return this.column_path != null; +- } +- +- public void setColumn_pathIsSet(boolean value) { +- if (!value) { +- this.column_path = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public get_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COLUMN_PATH: +- if (value == null) { +- unsetColumn_path(); +- } else { +- setColumn_path((ColumnPath)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COLUMN_PATH: +- return getColumn_path(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COLUMN_PATH: +- return isSetColumn_path(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_args) +- return this.equals((get_args)that); +- return false; +- } +- +- public boolean equals(get_args that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_column_path = true && this.isSetColumn_path(); +- boolean that_present_column_path = true && that.isSetColumn_path(); +- if (this_present_column_path || that_present_column_path) { +- if (!(this_present_column_path && that_present_column_path)) +- return false; +- if (!this.column_path.equals(that.column_path)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_column_path = true && (isSetColumn_path()); +- builder.append(present_column_path); +- if (present_column_path) +- builder.append(column_path); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_path()).compareTo(other.isSetColumn_path()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_path()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_path, other.column_path); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_args("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column_path:"); +- if (this.column_path == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_path); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- if (column_path == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_path' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (column_path != null) { +- column_path.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_argsStandardSchemeFactory implements SchemeFactory { +- public get_argsStandardScheme getScheme() { +- return new get_argsStandardScheme(); +- } +- } +- +- private static class get_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMN_PATH +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_path = new ColumnPath(); +- struct.column_path.read(iprot); +- struct.setColumn_pathIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- if (struct.column_path != null) { +- oprot.writeFieldBegin(COLUMN_PATH_FIELD_DESC); +- struct.column_path.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_argsTupleSchemeFactory implements SchemeFactory { +- public get_argsTupleScheme getScheme() { +- return new get_argsTupleScheme(); +- } +- } +- +- private static class get_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- struct.column_path.write(oprot); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- struct.column_path = new ColumnPath(); +- struct.column_path.read(iprot); +- struct.setColumn_pathIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class get_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField NFE_FIELD_DESC = new org.apache.thrift.protocol.TField("nfe", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_resultTupleSchemeFactory()); +- } +- +- public ColumnOrSuperColumn success; // required +- public InvalidRequestException ire; // required +- public NotFoundException nfe; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- NFE((short)2, "nfe"), +- UE((short)3, "ue"), +- TE((short)4, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // NFE +- return NFE; +- case 3: // UE +- return UE; +- case 4: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnOrSuperColumn.class))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.NFE, new org.apache.thrift.meta_data.FieldMetaData("nfe", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_result.class, metaDataMap); +- } +- +- public get_result() { +- } +- +- public get_result( +- ColumnOrSuperColumn success, +- InvalidRequestException ire, +- NotFoundException nfe, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.nfe = nfe; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_result(get_result other) { +- if (other.isSetSuccess()) { +- this.success = new ColumnOrSuperColumn(other.success); +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetNfe()) { +- this.nfe = new NotFoundException(other.nfe); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public get_result deepCopy() { +- return new get_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.nfe = null; +- this.ue = null; +- this.te = null; +- } +- +- public ColumnOrSuperColumn getSuccess() { +- return this.success; +- } +- +- public get_result setSuccess(ColumnOrSuperColumn success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public get_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public NotFoundException getNfe() { +- return this.nfe; +- } +- +- public get_result setNfe(NotFoundException nfe) { +- this.nfe = nfe; +- return this; +- } +- +- public void unsetNfe() { +- this.nfe = null; +- } +- +- /** Returns true if field nfe is set (has been assigned a value) and false otherwise */ +- public boolean isSetNfe() { +- return this.nfe != null; +- } +- +- public void setNfeIsSet(boolean value) { +- if (!value) { +- this.nfe = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public get_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public get_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((ColumnOrSuperColumn)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case NFE: +- if (value == null) { +- unsetNfe(); +- } else { +- setNfe((NotFoundException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case NFE: +- return getNfe(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case NFE: +- return isSetNfe(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_result) +- return this.equals((get_result)that); +- return false; +- } +- +- public boolean equals(get_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_nfe = true && this.isSetNfe(); +- boolean that_present_nfe = true && that.isSetNfe(); +- if (this_present_nfe || that_present_nfe) { +- if (!(this_present_nfe && that_present_nfe)) +- return false; +- if (!this.nfe.equals(that.nfe)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_nfe = true && (isSetNfe()); +- builder.append(present_nfe); +- if (present_nfe) +- builder.append(nfe); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetNfe()).compareTo(other.isSetNfe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetNfe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nfe, other.nfe); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("nfe:"); +- if (this.nfe == null) { +- sb.append("null"); +- } else { +- sb.append(this.nfe); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (success != null) { +- success.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_resultStandardSchemeFactory implements SchemeFactory { +- public get_resultStandardScheme getScheme() { +- return new get_resultStandardScheme(); +- } +- } +- +- private static class get_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.success = new ColumnOrSuperColumn(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // NFE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.nfe = new NotFoundException(); +- struct.nfe.read(iprot); +- struct.setNfeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- struct.success.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.nfe != null) { +- oprot.writeFieldBegin(NFE_FIELD_DESC); +- struct.nfe.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_resultTupleSchemeFactory implements SchemeFactory { +- public get_resultTupleScheme getScheme() { +- return new get_resultTupleScheme(); +- } +- } +- +- private static class get_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetNfe()) { +- optionals.set(2); +- } +- if (struct.isSetUe()) { +- optionals.set(3); +- } +- if (struct.isSetTe()) { +- optionals.set(4); +- } +- oprot.writeBitSet(optionals, 5); +- if (struct.isSetSuccess()) { +- struct.success.write(oprot); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetNfe()) { +- struct.nfe.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(5); +- if (incoming.get(0)) { +- struct.success = new ColumnOrSuperColumn(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.nfe = new NotFoundException(); +- struct.nfe.read(iprot); +- struct.setNfeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(4)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class get_slice_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_slice_args"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMN_PARENT_FIELD_DESC = new org.apache.thrift.protocol.TField("column_parent", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField PREDICATE_FIELD_DESC = new org.apache.thrift.protocol.TField("predicate", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_slice_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_slice_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public ColumnParent column_parent; // required +- public SlicePredicate predicate; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COLUMN_PARENT((short)2, "column_parent"), +- PREDICATE((short)3, "predicate"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)4, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COLUMN_PARENT +- return COLUMN_PARENT; +- case 3: // PREDICATE +- return PREDICATE; +- case 4: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMN_PARENT, new org.apache.thrift.meta_data.FieldMetaData("column_parent", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnParent.class))); +- tmpMap.put(_Fields.PREDICATE, new org.apache.thrift.meta_data.FieldMetaData("predicate", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SlicePredicate.class))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_slice_args.class, metaDataMap); +- } +- +- public get_slice_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public get_slice_args( +- ByteBuffer key, +- ColumnParent column_parent, +- SlicePredicate predicate, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.key = key; +- this.column_parent = column_parent; +- this.predicate = predicate; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_slice_args(get_slice_args other) { +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetColumn_parent()) { +- this.column_parent = new ColumnParent(other.column_parent); +- } +- if (other.isSetPredicate()) { +- this.predicate = new SlicePredicate(other.predicate); +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public get_slice_args deepCopy() { +- return new get_slice_args(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.column_parent = null; +- this.predicate = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public get_slice_args setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public get_slice_args setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public ColumnParent getColumn_parent() { +- return this.column_parent; +- } +- +- public get_slice_args setColumn_parent(ColumnParent column_parent) { +- this.column_parent = column_parent; +- return this; +- } +- +- public void unsetColumn_parent() { +- this.column_parent = null; +- } +- +- /** Returns true if field column_parent is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_parent() { +- return this.column_parent != null; +- } +- +- public void setColumn_parentIsSet(boolean value) { +- if (!value) { +- this.column_parent = null; +- } +- } +- +- public SlicePredicate getPredicate() { +- return this.predicate; +- } +- +- public get_slice_args setPredicate(SlicePredicate predicate) { +- this.predicate = predicate; +- return this; +- } +- +- public void unsetPredicate() { +- this.predicate = null; +- } +- +- /** Returns true if field predicate is set (has been assigned a value) and false otherwise */ +- public boolean isSetPredicate() { +- return this.predicate != null; +- } +- +- public void setPredicateIsSet(boolean value) { +- if (!value) { +- this.predicate = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public get_slice_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COLUMN_PARENT: +- if (value == null) { +- unsetColumn_parent(); +- } else { +- setColumn_parent((ColumnParent)value); +- } +- break; +- +- case PREDICATE: +- if (value == null) { +- unsetPredicate(); +- } else { +- setPredicate((SlicePredicate)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COLUMN_PARENT: +- return getColumn_parent(); +- +- case PREDICATE: +- return getPredicate(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COLUMN_PARENT: +- return isSetColumn_parent(); +- case PREDICATE: +- return isSetPredicate(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_slice_args) +- return this.equals((get_slice_args)that); +- return false; +- } +- +- public boolean equals(get_slice_args that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_column_parent = true && this.isSetColumn_parent(); +- boolean that_present_column_parent = true && that.isSetColumn_parent(); +- if (this_present_column_parent || that_present_column_parent) { +- if (!(this_present_column_parent && that_present_column_parent)) +- return false; +- if (!this.column_parent.equals(that.column_parent)) +- return false; +- } +- +- boolean this_present_predicate = true && this.isSetPredicate(); +- boolean that_present_predicate = true && that.isSetPredicate(); +- if (this_present_predicate || that_present_predicate) { +- if (!(this_present_predicate && that_present_predicate)) +- return false; +- if (!this.predicate.equals(that.predicate)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_column_parent = true && (isSetColumn_parent()); +- builder.append(present_column_parent); +- if (present_column_parent) +- builder.append(column_parent); +- +- boolean present_predicate = true && (isSetPredicate()); +- builder.append(present_predicate); +- if (present_predicate) +- builder.append(predicate); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_slice_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_parent()).compareTo(other.isSetColumn_parent()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_parent()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_parent, other.column_parent); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetPredicate()).compareTo(other.isSetPredicate()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetPredicate()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.predicate, other.predicate); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_slice_args("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column_parent:"); +- if (this.column_parent == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_parent); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("predicate:"); +- if (this.predicate == null) { +- sb.append("null"); +- } else { +- sb.append(this.predicate); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- if (column_parent == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_parent' was not present! Struct: " + toString()); +- } +- if (predicate == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'predicate' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (column_parent != null) { +- column_parent.validate(); +- } +- if (predicate != null) { +- predicate.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_slice_argsStandardSchemeFactory implements SchemeFactory { +- public get_slice_argsStandardScheme getScheme() { +- return new get_slice_argsStandardScheme(); +- } +- } +- +- private static class get_slice_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_slice_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMN_PARENT +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // PREDICATE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_slice_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- if (struct.column_parent != null) { +- oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC); +- struct.column_parent.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.predicate != null) { +- oprot.writeFieldBegin(PREDICATE_FIELD_DESC); +- struct.predicate.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_slice_argsTupleSchemeFactory implements SchemeFactory { +- public get_slice_argsTupleScheme getScheme() { +- return new get_slice_argsTupleScheme(); +- } +- } +- +- private static class get_slice_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_slice_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- struct.column_parent.write(oprot); +- struct.predicate.write(oprot); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_slice_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class get_slice_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_slice_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_slice_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_slice_resultTupleSchemeFactory()); +- } +- +- public List success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnOrSuperColumn.class)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_slice_result.class, metaDataMap); +- } +- +- public get_slice_result() { +- } +- +- public get_slice_result( +- List success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_slice_result(get_slice_result other) { +- if (other.isSetSuccess()) { +- List __this__success = new ArrayList(other.success.size()); +- for (ColumnOrSuperColumn other_element : other.success) { +- __this__success.add(new ColumnOrSuperColumn(other_element)); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public get_slice_result deepCopy() { +- return new get_slice_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public java.util.Iterator getSuccessIterator() { +- return (this.success == null) ? null : this.success.iterator(); +- } +- +- public void addToSuccess(ColumnOrSuperColumn elem) { +- if (this.success == null) { +- this.success = new ArrayList(); +- } +- this.success.add(elem); +- } +- +- public List getSuccess() { +- return this.success; +- } +- +- public get_slice_result setSuccess(List success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public get_slice_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public get_slice_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public get_slice_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((List)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_slice_result) +- return this.equals((get_slice_result)that); +- return false; +- } +- +- public boolean equals(get_slice_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_slice_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_slice_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_slice_resultStandardSchemeFactory implements SchemeFactory { +- public get_slice_resultStandardScheme getScheme() { +- return new get_slice_resultStandardScheme(); +- } +- } +- +- private static class get_slice_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_slice_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list224 = iprot.readListBegin(); +- struct.success = new ArrayList(_list224.size); +- for (int _i225 = 0; _i225 < _list224.size; ++_i225) +- { +- ColumnOrSuperColumn _elem226; +- _elem226 = new ColumnOrSuperColumn(); +- _elem226.read(iprot); +- struct.success.add(_elem226); +- } +- iprot.readListEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_slice_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); +- for (ColumnOrSuperColumn _iter227 : struct.success) +- { +- _iter227.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_slice_resultTupleSchemeFactory implements SchemeFactory { +- public get_slice_resultTupleScheme getScheme() { +- return new get_slice_resultTupleScheme(); +- } +- } +- +- private static class get_slice_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_slice_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- oprot.writeBitSet(optionals, 4); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (ColumnOrSuperColumn _iter228 : struct.success) +- { +- _iter228.write(oprot); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_slice_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(4); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list229 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.success = new ArrayList(_list229.size); +- for (int _i230 = 0; _i230 < _list229.size; ++_i230) +- { +- ColumnOrSuperColumn _elem231; +- _elem231 = new ColumnOrSuperColumn(); +- _elem231.read(iprot); +- struct.success.add(_elem231); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class get_count_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_count_args"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMN_PARENT_FIELD_DESC = new org.apache.thrift.protocol.TField("column_parent", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField PREDICATE_FIELD_DESC = new org.apache.thrift.protocol.TField("predicate", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_count_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_count_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public ColumnParent column_parent; // required +- public SlicePredicate predicate; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COLUMN_PARENT((short)2, "column_parent"), +- PREDICATE((short)3, "predicate"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)4, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COLUMN_PARENT +- return COLUMN_PARENT; +- case 3: // PREDICATE +- return PREDICATE; +- case 4: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMN_PARENT, new org.apache.thrift.meta_data.FieldMetaData("column_parent", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnParent.class))); +- tmpMap.put(_Fields.PREDICATE, new org.apache.thrift.meta_data.FieldMetaData("predicate", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SlicePredicate.class))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_count_args.class, metaDataMap); +- } +- +- public get_count_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public get_count_args( +- ByteBuffer key, +- ColumnParent column_parent, +- SlicePredicate predicate, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.key = key; +- this.column_parent = column_parent; +- this.predicate = predicate; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_count_args(get_count_args other) { +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetColumn_parent()) { +- this.column_parent = new ColumnParent(other.column_parent); +- } +- if (other.isSetPredicate()) { +- this.predicate = new SlicePredicate(other.predicate); +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public get_count_args deepCopy() { +- return new get_count_args(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.column_parent = null; +- this.predicate = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public get_count_args setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public get_count_args setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public ColumnParent getColumn_parent() { +- return this.column_parent; +- } +- +- public get_count_args setColumn_parent(ColumnParent column_parent) { +- this.column_parent = column_parent; +- return this; +- } +- +- public void unsetColumn_parent() { +- this.column_parent = null; +- } +- +- /** Returns true if field column_parent is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_parent() { +- return this.column_parent != null; +- } +- +- public void setColumn_parentIsSet(boolean value) { +- if (!value) { +- this.column_parent = null; +- } +- } +- +- public SlicePredicate getPredicate() { +- return this.predicate; +- } +- +- public get_count_args setPredicate(SlicePredicate predicate) { +- this.predicate = predicate; +- return this; +- } +- +- public void unsetPredicate() { +- this.predicate = null; +- } +- +- /** Returns true if field predicate is set (has been assigned a value) and false otherwise */ +- public boolean isSetPredicate() { +- return this.predicate != null; +- } +- +- public void setPredicateIsSet(boolean value) { +- if (!value) { +- this.predicate = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public get_count_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COLUMN_PARENT: +- if (value == null) { +- unsetColumn_parent(); +- } else { +- setColumn_parent((ColumnParent)value); +- } +- break; +- +- case PREDICATE: +- if (value == null) { +- unsetPredicate(); +- } else { +- setPredicate((SlicePredicate)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COLUMN_PARENT: +- return getColumn_parent(); +- +- case PREDICATE: +- return getPredicate(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COLUMN_PARENT: +- return isSetColumn_parent(); +- case PREDICATE: +- return isSetPredicate(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_count_args) +- return this.equals((get_count_args)that); +- return false; +- } +- +- public boolean equals(get_count_args that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_column_parent = true && this.isSetColumn_parent(); +- boolean that_present_column_parent = true && that.isSetColumn_parent(); +- if (this_present_column_parent || that_present_column_parent) { +- if (!(this_present_column_parent && that_present_column_parent)) +- return false; +- if (!this.column_parent.equals(that.column_parent)) +- return false; +- } +- +- boolean this_present_predicate = true && this.isSetPredicate(); +- boolean that_present_predicate = true && that.isSetPredicate(); +- if (this_present_predicate || that_present_predicate) { +- if (!(this_present_predicate && that_present_predicate)) +- return false; +- if (!this.predicate.equals(that.predicate)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_column_parent = true && (isSetColumn_parent()); +- builder.append(present_column_parent); +- if (present_column_parent) +- builder.append(column_parent); +- +- boolean present_predicate = true && (isSetPredicate()); +- builder.append(present_predicate); +- if (present_predicate) +- builder.append(predicate); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_count_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_parent()).compareTo(other.isSetColumn_parent()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_parent()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_parent, other.column_parent); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetPredicate()).compareTo(other.isSetPredicate()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetPredicate()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.predicate, other.predicate); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_count_args("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column_parent:"); +- if (this.column_parent == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_parent); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("predicate:"); +- if (this.predicate == null) { +- sb.append("null"); +- } else { +- sb.append(this.predicate); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- if (column_parent == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_parent' was not present! Struct: " + toString()); +- } +- if (predicate == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'predicate' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (column_parent != null) { +- column_parent.validate(); +- } +- if (predicate != null) { +- predicate.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_count_argsStandardSchemeFactory implements SchemeFactory { +- public get_count_argsStandardScheme getScheme() { +- return new get_count_argsStandardScheme(); +- } +- } +- +- private static class get_count_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_count_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMN_PARENT +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // PREDICATE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_count_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- if (struct.column_parent != null) { +- oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC); +- struct.column_parent.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.predicate != null) { +- oprot.writeFieldBegin(PREDICATE_FIELD_DESC); +- struct.predicate.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_count_argsTupleSchemeFactory implements SchemeFactory { +- public get_count_argsTupleScheme getScheme() { +- return new get_count_argsTupleScheme(); +- } +- } +- +- private static class get_count_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_count_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- struct.column_parent.write(oprot); +- struct.predicate.write(oprot); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_count_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class get_count_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_count_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_count_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_count_resultTupleSchemeFactory()); +- } +- +- public int success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __SUCCESS_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_count_result.class, metaDataMap); +- } +- +- public get_count_result() { +- } +- +- public get_count_result( +- int success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.success = success; +- setSuccessIsSet(true); +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_count_result(get_count_result other) { +- __isset_bitfield = other.__isset_bitfield; +- this.success = other.success; +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public get_count_result deepCopy() { +- return new get_count_result(this); +- } +- +- @Override +- public void clear() { +- setSuccessIsSet(false); +- this.success = 0; +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public int getSuccess() { +- return this.success; +- } +- +- public get_count_result setSuccess(int success) { +- this.success = success; +- setSuccessIsSet(true); +- return this; +- } +- +- public void unsetSuccess() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); +- } +- +- public void setSuccessIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public get_count_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public get_count_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public get_count_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((Integer)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return Integer.valueOf(getSuccess()); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_count_result) +- return this.equals((get_count_result)that); +- return false; +- } +- +- public boolean equals(get_count_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true; +- boolean that_present_success = true; +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (this.success != that.success) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true; +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_count_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_count_result("); +- boolean first = true; +- +- sb.append("success:"); +- sb.append(this.success); +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_count_resultStandardSchemeFactory implements SchemeFactory { +- public get_count_resultStandardScheme getScheme() { +- return new get_count_resultStandardScheme(); +- } +- } +- +- private static class get_count_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_count_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.success = iprot.readI32(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_count_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.isSetSuccess()) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeI32(struct.success); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_count_resultTupleSchemeFactory implements SchemeFactory { +- public get_count_resultTupleScheme getScheme() { +- return new get_count_resultTupleScheme(); +- } +- } +- +- private static class get_count_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_count_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- oprot.writeBitSet(optionals, 4); +- if (struct.isSetSuccess()) { +- oprot.writeI32(struct.success); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_count_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(4); +- if (incoming.get(0)) { +- struct.success = iprot.readI32(); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class multiget_slice_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("multiget_slice_args"); +- +- private static final org.apache.thrift.protocol.TField KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("keys", org.apache.thrift.protocol.TType.LIST, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMN_PARENT_FIELD_DESC = new org.apache.thrift.protocol.TField("column_parent", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField PREDICATE_FIELD_DESC = new org.apache.thrift.protocol.TField("predicate", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new multiget_slice_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new multiget_slice_argsTupleSchemeFactory()); +- } +- +- public List keys; // required +- public ColumnParent column_parent; // required +- public SlicePredicate predicate; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEYS((short)1, "keys"), +- COLUMN_PARENT((short)2, "column_parent"), +- PREDICATE((short)3, "predicate"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)4, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEYS +- return KEYS; +- case 2: // COLUMN_PARENT +- return COLUMN_PARENT; +- case 3: // PREDICATE +- return PREDICATE; +- case 4: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEYS, new org.apache.thrift.meta_data.FieldMetaData("keys", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)))); +- tmpMap.put(_Fields.COLUMN_PARENT, new org.apache.thrift.meta_data.FieldMetaData("column_parent", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnParent.class))); +- tmpMap.put(_Fields.PREDICATE, new org.apache.thrift.meta_data.FieldMetaData("predicate", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SlicePredicate.class))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(multiget_slice_args.class, metaDataMap); +- } +- +- public multiget_slice_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public multiget_slice_args( +- List keys, +- ColumnParent column_parent, +- SlicePredicate predicate, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.keys = keys; +- this.column_parent = column_parent; +- this.predicate = predicate; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public multiget_slice_args(multiget_slice_args other) { +- if (other.isSetKeys()) { +- List __this__keys = new ArrayList(other.keys); +- this.keys = __this__keys; +- } +- if (other.isSetColumn_parent()) { +- this.column_parent = new ColumnParent(other.column_parent); +- } +- if (other.isSetPredicate()) { +- this.predicate = new SlicePredicate(other.predicate); +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public multiget_slice_args deepCopy() { +- return new multiget_slice_args(this); +- } +- +- @Override +- public void clear() { +- this.keys = null; +- this.column_parent = null; +- this.predicate = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public int getKeysSize() { +- return (this.keys == null) ? 0 : this.keys.size(); +- } +- +- public java.util.Iterator getKeysIterator() { +- return (this.keys == null) ? null : this.keys.iterator(); +- } +- +- public void addToKeys(ByteBuffer elem) { +- if (this.keys == null) { +- this.keys = new ArrayList(); +- } +- this.keys.add(elem); +- } +- +- public List getKeys() { +- return this.keys; +- } +- +- public multiget_slice_args setKeys(List keys) { +- this.keys = keys; +- return this; +- } +- +- public void unsetKeys() { +- this.keys = null; +- } +- +- /** Returns true if field keys is set (has been assigned a value) and false otherwise */ +- public boolean isSetKeys() { +- return this.keys != null; +- } +- +- public void setKeysIsSet(boolean value) { +- if (!value) { +- this.keys = null; +- } +- } +- +- public ColumnParent getColumn_parent() { +- return this.column_parent; +- } +- +- public multiget_slice_args setColumn_parent(ColumnParent column_parent) { +- this.column_parent = column_parent; +- return this; +- } +- +- public void unsetColumn_parent() { +- this.column_parent = null; +- } +- +- /** Returns true if field column_parent is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_parent() { +- return this.column_parent != null; +- } +- +- public void setColumn_parentIsSet(boolean value) { +- if (!value) { +- this.column_parent = null; +- } +- } +- +- public SlicePredicate getPredicate() { +- return this.predicate; +- } +- +- public multiget_slice_args setPredicate(SlicePredicate predicate) { +- this.predicate = predicate; +- return this; +- } +- +- public void unsetPredicate() { +- this.predicate = null; +- } +- +- /** Returns true if field predicate is set (has been assigned a value) and false otherwise */ +- public boolean isSetPredicate() { +- return this.predicate != null; +- } +- +- public void setPredicateIsSet(boolean value) { +- if (!value) { +- this.predicate = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public multiget_slice_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEYS: +- if (value == null) { +- unsetKeys(); +- } else { +- setKeys((List)value); +- } +- break; +- +- case COLUMN_PARENT: +- if (value == null) { +- unsetColumn_parent(); +- } else { +- setColumn_parent((ColumnParent)value); +- } +- break; +- +- case PREDICATE: +- if (value == null) { +- unsetPredicate(); +- } else { +- setPredicate((SlicePredicate)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEYS: +- return getKeys(); +- +- case COLUMN_PARENT: +- return getColumn_parent(); +- +- case PREDICATE: +- return getPredicate(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEYS: +- return isSetKeys(); +- case COLUMN_PARENT: +- return isSetColumn_parent(); +- case PREDICATE: +- return isSetPredicate(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof multiget_slice_args) +- return this.equals((multiget_slice_args)that); +- return false; +- } +- +- public boolean equals(multiget_slice_args that) { +- if (that == null) +- return false; +- +- boolean this_present_keys = true && this.isSetKeys(); +- boolean that_present_keys = true && that.isSetKeys(); +- if (this_present_keys || that_present_keys) { +- if (!(this_present_keys && that_present_keys)) +- return false; +- if (!this.keys.equals(that.keys)) +- return false; +- } +- +- boolean this_present_column_parent = true && this.isSetColumn_parent(); +- boolean that_present_column_parent = true && that.isSetColumn_parent(); +- if (this_present_column_parent || that_present_column_parent) { +- if (!(this_present_column_parent && that_present_column_parent)) +- return false; +- if (!this.column_parent.equals(that.column_parent)) +- return false; +- } +- +- boolean this_present_predicate = true && this.isSetPredicate(); +- boolean that_present_predicate = true && that.isSetPredicate(); +- if (this_present_predicate || that_present_predicate) { +- if (!(this_present_predicate && that_present_predicate)) +- return false; +- if (!this.predicate.equals(that.predicate)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_keys = true && (isSetKeys()); +- builder.append(present_keys); +- if (present_keys) +- builder.append(keys); +- +- boolean present_column_parent = true && (isSetColumn_parent()); +- builder.append(present_column_parent); +- if (present_column_parent) +- builder.append(column_parent); +- +- boolean present_predicate = true && (isSetPredicate()); +- builder.append(present_predicate); +- if (present_predicate) +- builder.append(predicate); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(multiget_slice_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKeys()).compareTo(other.isSetKeys()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKeys()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keys, other.keys); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_parent()).compareTo(other.isSetColumn_parent()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_parent()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_parent, other.column_parent); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetPredicate()).compareTo(other.isSetPredicate()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetPredicate()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.predicate, other.predicate); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("multiget_slice_args("); +- boolean first = true; +- +- sb.append("keys:"); +- if (this.keys == null) { +- sb.append("null"); +- } else { +- sb.append(this.keys); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column_parent:"); +- if (this.column_parent == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_parent); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("predicate:"); +- if (this.predicate == null) { +- sb.append("null"); +- } else { +- sb.append(this.predicate); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (keys == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'keys' was not present! Struct: " + toString()); +- } +- if (column_parent == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_parent' was not present! Struct: " + toString()); +- } +- if (predicate == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'predicate' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (column_parent != null) { +- column_parent.validate(); +- } +- if (predicate != null) { +- predicate.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class multiget_slice_argsStandardSchemeFactory implements SchemeFactory { +- public multiget_slice_argsStandardScheme getScheme() { +- return new multiget_slice_argsStandardScheme(); +- } +- } +- +- private static class multiget_slice_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, multiget_slice_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEYS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list232 = iprot.readListBegin(); +- struct.keys = new ArrayList(_list232.size); +- for (int _i233 = 0; _i233 < _list232.size; ++_i233) +- { +- ByteBuffer _elem234; +- _elem234 = iprot.readBinary(); +- struct.keys.add(_elem234); +- } +- iprot.readListEnd(); +- } +- struct.setKeysIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMN_PARENT +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // PREDICATE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, multiget_slice_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.keys != null) { +- oprot.writeFieldBegin(KEYS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.keys.size())); +- for (ByteBuffer _iter235 : struct.keys) +- { +- oprot.writeBinary(_iter235); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.column_parent != null) { +- oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC); +- struct.column_parent.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.predicate != null) { +- oprot.writeFieldBegin(PREDICATE_FIELD_DESC); +- struct.predicate.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class multiget_slice_argsTupleSchemeFactory implements SchemeFactory { +- public multiget_slice_argsTupleScheme getScheme() { +- return new multiget_slice_argsTupleScheme(); +- } +- } +- +- private static class multiget_slice_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, multiget_slice_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- { +- oprot.writeI32(struct.keys.size()); +- for (ByteBuffer _iter236 : struct.keys) +- { +- oprot.writeBinary(_iter236); +- } +- } +- struct.column_parent.write(oprot); +- struct.predicate.write(oprot); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, multiget_slice_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- { +- org.apache.thrift.protocol.TList _list237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.keys = new ArrayList(_list237.size); +- for (int _i238 = 0; _i238 < _list237.size; ++_i238) +- { +- ByteBuffer _elem239; +- _elem239 = iprot.readBinary(); +- struct.keys.add(_elem239); +- } +- } +- struct.setKeysIsSet(true); +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class multiget_slice_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("multiget_slice_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new multiget_slice_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new multiget_slice_resultTupleSchemeFactory()); +- } +- +- public Map> success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true), +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnOrSuperColumn.class))))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(multiget_slice_result.class, metaDataMap); +- } +- +- public multiget_slice_result() { +- } +- +- public multiget_slice_result( +- Map> success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public multiget_slice_result(multiget_slice_result other) { +- if (other.isSetSuccess()) { +- Map> __this__success = new HashMap>(other.success.size()); +- for (Map.Entry> other_element : other.success.entrySet()) { +- +- ByteBuffer other_element_key = other_element.getKey(); +- List other_element_value = other_element.getValue(); +- +- ByteBuffer __this__success_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key); +-; +- +- List __this__success_copy_value = new ArrayList(other_element_value.size()); +- for (ColumnOrSuperColumn other_element_value_element : other_element_value) { +- __this__success_copy_value.add(new ColumnOrSuperColumn(other_element_value_element)); +- } +- +- __this__success.put(__this__success_copy_key, __this__success_copy_value); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public multiget_slice_result deepCopy() { +- return new multiget_slice_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public void putToSuccess(ByteBuffer key, List val) { +- if (this.success == null) { +- this.success = new HashMap>(); +- } +- this.success.put(key, val); +- } +- +- public Map> getSuccess() { +- return this.success; +- } +- +- public multiget_slice_result setSuccess(Map> success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public multiget_slice_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public multiget_slice_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public multiget_slice_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((Map>)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof multiget_slice_result) +- return this.equals((multiget_slice_result)that); +- return false; +- } +- +- public boolean equals(multiget_slice_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(multiget_slice_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("multiget_slice_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class multiget_slice_resultStandardSchemeFactory implements SchemeFactory { +- public multiget_slice_resultStandardScheme getScheme() { +- return new multiget_slice_resultStandardScheme(); +- } +- } +- +- private static class multiget_slice_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, multiget_slice_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map240 = iprot.readMapBegin(); +- struct.success = new HashMap>(2*_map240.size); +- for (int _i241 = 0; _i241 < _map240.size; ++_i241) +- { +- ByteBuffer _key242; +- List _val243; +- _key242 = iprot.readBinary(); +- { +- org.apache.thrift.protocol.TList _list244 = iprot.readListBegin(); +- _val243 = new ArrayList(_list244.size); +- for (int _i245 = 0; _i245 < _list244.size; ++_i245) +- { +- ColumnOrSuperColumn _elem246; +- _elem246 = new ColumnOrSuperColumn(); +- _elem246.read(iprot); +- _val243.add(_elem246); +- } +- iprot.readListEnd(); +- } +- struct.success.put(_key242, _val243); +- } +- iprot.readMapEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, multiget_slice_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.success.size())); +- for (Map.Entry> _iter247 : struct.success.entrySet()) +- { +- oprot.writeBinary(_iter247.getKey()); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter247.getValue().size())); +- for (ColumnOrSuperColumn _iter248 : _iter247.getValue()) +- { +- _iter248.write(oprot); +- } +- oprot.writeListEnd(); +- } +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class multiget_slice_resultTupleSchemeFactory implements SchemeFactory { +- public multiget_slice_resultTupleScheme getScheme() { +- return new multiget_slice_resultTupleScheme(); +- } +- } +- +- private static class multiget_slice_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, multiget_slice_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- oprot.writeBitSet(optionals, 4); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (Map.Entry> _iter249 : struct.success.entrySet()) +- { +- oprot.writeBinary(_iter249.getKey()); +- { +- oprot.writeI32(_iter249.getValue().size()); +- for (ColumnOrSuperColumn _iter250 : _iter249.getValue()) +- { +- _iter250.write(oprot); +- } +- } +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, multiget_slice_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(4); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TMap _map251 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); +- struct.success = new HashMap>(2*_map251.size); +- for (int _i252 = 0; _i252 < _map251.size; ++_i252) +- { +- ByteBuffer _key253; +- List _val254; +- _key253 = iprot.readBinary(); +- { +- org.apache.thrift.protocol.TList _list255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- _val254 = new ArrayList(_list255.size); +- for (int _i256 = 0; _i256 < _list255.size; ++_i256) +- { +- ColumnOrSuperColumn _elem257; +- _elem257 = new ColumnOrSuperColumn(); +- _elem257.read(iprot); +- _val254.add(_elem257); +- } +- } +- struct.success.put(_key253, _val254); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class multiget_count_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("multiget_count_args"); +- +- private static final org.apache.thrift.protocol.TField KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("keys", org.apache.thrift.protocol.TType.LIST, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMN_PARENT_FIELD_DESC = new org.apache.thrift.protocol.TField("column_parent", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField PREDICATE_FIELD_DESC = new org.apache.thrift.protocol.TField("predicate", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new multiget_count_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new multiget_count_argsTupleSchemeFactory()); +- } +- +- public List keys; // required +- public ColumnParent column_parent; // required +- public SlicePredicate predicate; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEYS((short)1, "keys"), +- COLUMN_PARENT((short)2, "column_parent"), +- PREDICATE((short)3, "predicate"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)4, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEYS +- return KEYS; +- case 2: // COLUMN_PARENT +- return COLUMN_PARENT; +- case 3: // PREDICATE +- return PREDICATE; +- case 4: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEYS, new org.apache.thrift.meta_data.FieldMetaData("keys", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)))); +- tmpMap.put(_Fields.COLUMN_PARENT, new org.apache.thrift.meta_data.FieldMetaData("column_parent", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnParent.class))); +- tmpMap.put(_Fields.PREDICATE, new org.apache.thrift.meta_data.FieldMetaData("predicate", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SlicePredicate.class))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(multiget_count_args.class, metaDataMap); +- } +- +- public multiget_count_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public multiget_count_args( +- List keys, +- ColumnParent column_parent, +- SlicePredicate predicate, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.keys = keys; +- this.column_parent = column_parent; +- this.predicate = predicate; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public multiget_count_args(multiget_count_args other) { +- if (other.isSetKeys()) { +- List __this__keys = new ArrayList(other.keys); +- this.keys = __this__keys; +- } +- if (other.isSetColumn_parent()) { +- this.column_parent = new ColumnParent(other.column_parent); +- } +- if (other.isSetPredicate()) { +- this.predicate = new SlicePredicate(other.predicate); +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public multiget_count_args deepCopy() { +- return new multiget_count_args(this); +- } +- +- @Override +- public void clear() { +- this.keys = null; +- this.column_parent = null; +- this.predicate = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public int getKeysSize() { +- return (this.keys == null) ? 0 : this.keys.size(); +- } +- +- public java.util.Iterator getKeysIterator() { +- return (this.keys == null) ? null : this.keys.iterator(); +- } +- +- public void addToKeys(ByteBuffer elem) { +- if (this.keys == null) { +- this.keys = new ArrayList(); +- } +- this.keys.add(elem); +- } +- +- public List getKeys() { +- return this.keys; +- } +- +- public multiget_count_args setKeys(List keys) { +- this.keys = keys; +- return this; +- } +- +- public void unsetKeys() { +- this.keys = null; +- } +- +- /** Returns true if field keys is set (has been assigned a value) and false otherwise */ +- public boolean isSetKeys() { +- return this.keys != null; +- } +- +- public void setKeysIsSet(boolean value) { +- if (!value) { +- this.keys = null; +- } +- } +- +- public ColumnParent getColumn_parent() { +- return this.column_parent; +- } +- +- public multiget_count_args setColumn_parent(ColumnParent column_parent) { +- this.column_parent = column_parent; +- return this; +- } +- +- public void unsetColumn_parent() { +- this.column_parent = null; +- } +- +- /** Returns true if field column_parent is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_parent() { +- return this.column_parent != null; +- } +- +- public void setColumn_parentIsSet(boolean value) { +- if (!value) { +- this.column_parent = null; +- } +- } +- +- public SlicePredicate getPredicate() { +- return this.predicate; +- } +- +- public multiget_count_args setPredicate(SlicePredicate predicate) { +- this.predicate = predicate; +- return this; +- } +- +- public void unsetPredicate() { +- this.predicate = null; +- } +- +- /** Returns true if field predicate is set (has been assigned a value) and false otherwise */ +- public boolean isSetPredicate() { +- return this.predicate != null; +- } +- +- public void setPredicateIsSet(boolean value) { +- if (!value) { +- this.predicate = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public multiget_count_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEYS: +- if (value == null) { +- unsetKeys(); +- } else { +- setKeys((List)value); +- } +- break; +- +- case COLUMN_PARENT: +- if (value == null) { +- unsetColumn_parent(); +- } else { +- setColumn_parent((ColumnParent)value); +- } +- break; +- +- case PREDICATE: +- if (value == null) { +- unsetPredicate(); +- } else { +- setPredicate((SlicePredicate)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEYS: +- return getKeys(); +- +- case COLUMN_PARENT: +- return getColumn_parent(); +- +- case PREDICATE: +- return getPredicate(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEYS: +- return isSetKeys(); +- case COLUMN_PARENT: +- return isSetColumn_parent(); +- case PREDICATE: +- return isSetPredicate(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof multiget_count_args) +- return this.equals((multiget_count_args)that); +- return false; +- } +- +- public boolean equals(multiget_count_args that) { +- if (that == null) +- return false; +- +- boolean this_present_keys = true && this.isSetKeys(); +- boolean that_present_keys = true && that.isSetKeys(); +- if (this_present_keys || that_present_keys) { +- if (!(this_present_keys && that_present_keys)) +- return false; +- if (!this.keys.equals(that.keys)) +- return false; +- } +- +- boolean this_present_column_parent = true && this.isSetColumn_parent(); +- boolean that_present_column_parent = true && that.isSetColumn_parent(); +- if (this_present_column_parent || that_present_column_parent) { +- if (!(this_present_column_parent && that_present_column_parent)) +- return false; +- if (!this.column_parent.equals(that.column_parent)) +- return false; +- } +- +- boolean this_present_predicate = true && this.isSetPredicate(); +- boolean that_present_predicate = true && that.isSetPredicate(); +- if (this_present_predicate || that_present_predicate) { +- if (!(this_present_predicate && that_present_predicate)) +- return false; +- if (!this.predicate.equals(that.predicate)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_keys = true && (isSetKeys()); +- builder.append(present_keys); +- if (present_keys) +- builder.append(keys); +- +- boolean present_column_parent = true && (isSetColumn_parent()); +- builder.append(present_column_parent); +- if (present_column_parent) +- builder.append(column_parent); +- +- boolean present_predicate = true && (isSetPredicate()); +- builder.append(present_predicate); +- if (present_predicate) +- builder.append(predicate); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(multiget_count_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKeys()).compareTo(other.isSetKeys()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKeys()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keys, other.keys); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_parent()).compareTo(other.isSetColumn_parent()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_parent()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_parent, other.column_parent); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetPredicate()).compareTo(other.isSetPredicate()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetPredicate()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.predicate, other.predicate); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("multiget_count_args("); +- boolean first = true; +- +- sb.append("keys:"); +- if (this.keys == null) { +- sb.append("null"); +- } else { +- sb.append(this.keys); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column_parent:"); +- if (this.column_parent == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_parent); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("predicate:"); +- if (this.predicate == null) { +- sb.append("null"); +- } else { +- sb.append(this.predicate); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (keys == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'keys' was not present! Struct: " + toString()); +- } +- if (column_parent == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_parent' was not present! Struct: " + toString()); +- } +- if (predicate == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'predicate' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (column_parent != null) { +- column_parent.validate(); +- } +- if (predicate != null) { +- predicate.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class multiget_count_argsStandardSchemeFactory implements SchemeFactory { +- public multiget_count_argsStandardScheme getScheme() { +- return new multiget_count_argsStandardScheme(); +- } +- } +- +- private static class multiget_count_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, multiget_count_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEYS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list258 = iprot.readListBegin(); +- struct.keys = new ArrayList(_list258.size); +- for (int _i259 = 0; _i259 < _list258.size; ++_i259) +- { +- ByteBuffer _elem260; +- _elem260 = iprot.readBinary(); +- struct.keys.add(_elem260); +- } +- iprot.readListEnd(); +- } +- struct.setKeysIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMN_PARENT +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // PREDICATE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, multiget_count_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.keys != null) { +- oprot.writeFieldBegin(KEYS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.keys.size())); +- for (ByteBuffer _iter261 : struct.keys) +- { +- oprot.writeBinary(_iter261); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.column_parent != null) { +- oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC); +- struct.column_parent.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.predicate != null) { +- oprot.writeFieldBegin(PREDICATE_FIELD_DESC); +- struct.predicate.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class multiget_count_argsTupleSchemeFactory implements SchemeFactory { +- public multiget_count_argsTupleScheme getScheme() { +- return new multiget_count_argsTupleScheme(); +- } +- } +- +- private static class multiget_count_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, multiget_count_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- { +- oprot.writeI32(struct.keys.size()); +- for (ByteBuffer _iter262 : struct.keys) +- { +- oprot.writeBinary(_iter262); +- } +- } +- struct.column_parent.write(oprot); +- struct.predicate.write(oprot); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, multiget_count_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- { +- org.apache.thrift.protocol.TList _list263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.keys = new ArrayList(_list263.size); +- for (int _i264 = 0; _i264 < _list263.size; ++_i264) +- { +- ByteBuffer _elem265; +- _elem265 = iprot.readBinary(); +- struct.keys.add(_elem265); +- } +- } +- struct.setKeysIsSet(true); +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class multiget_count_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("multiget_count_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new multiget_count_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new multiget_count_resultTupleSchemeFactory()); +- } +- +- public Map success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true), +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(multiget_count_result.class, metaDataMap); +- } +- +- public multiget_count_result() { +- } +- +- public multiget_count_result( +- Map success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public multiget_count_result(multiget_count_result other) { +- if (other.isSetSuccess()) { +- Map __this__success = new HashMap(other.success); +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public multiget_count_result deepCopy() { +- return new multiget_count_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public void putToSuccess(ByteBuffer key, int val) { +- if (this.success == null) { +- this.success = new HashMap(); +- } +- this.success.put(key, val); +- } +- +- public Map getSuccess() { +- return this.success; +- } +- +- public multiget_count_result setSuccess(Map success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public multiget_count_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public multiget_count_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public multiget_count_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((Map)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof multiget_count_result) +- return this.equals((multiget_count_result)that); +- return false; +- } +- +- public boolean equals(multiget_count_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(multiget_count_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("multiget_count_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class multiget_count_resultStandardSchemeFactory implements SchemeFactory { +- public multiget_count_resultStandardScheme getScheme() { +- return new multiget_count_resultStandardScheme(); +- } +- } +- +- private static class multiget_count_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, multiget_count_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map266 = iprot.readMapBegin(); +- struct.success = new HashMap(2*_map266.size); +- for (int _i267 = 0; _i267 < _map266.size; ++_i267) +- { +- ByteBuffer _key268; +- int _val269; +- _key268 = iprot.readBinary(); +- _val269 = iprot.readI32(); +- struct.success.put(_key268, _val269); +- } +- iprot.readMapEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, multiget_count_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.success.size())); +- for (Map.Entry _iter270 : struct.success.entrySet()) +- { +- oprot.writeBinary(_iter270.getKey()); +- oprot.writeI32(_iter270.getValue()); +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class multiget_count_resultTupleSchemeFactory implements SchemeFactory { +- public multiget_count_resultTupleScheme getScheme() { +- return new multiget_count_resultTupleScheme(); +- } +- } +- +- private static class multiget_count_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, multiget_count_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- oprot.writeBitSet(optionals, 4); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (Map.Entry _iter271 : struct.success.entrySet()) +- { +- oprot.writeBinary(_iter271.getKey()); +- oprot.writeI32(_iter271.getValue()); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, multiget_count_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(4); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TMap _map272 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); +- struct.success = new HashMap(2*_map272.size); +- for (int _i273 = 0; _i273 < _map272.size; ++_i273) +- { +- ByteBuffer _key274; +- int _val275; +- _key274 = iprot.readBinary(); +- _val275 = iprot.readI32(); +- struct.success.put(_key274, _val275); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class get_range_slices_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_range_slices_args"); +- +- private static final org.apache.thrift.protocol.TField COLUMN_PARENT_FIELD_DESC = new org.apache.thrift.protocol.TField("column_parent", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField PREDICATE_FIELD_DESC = new org.apache.thrift.protocol.TField("predicate", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField RANGE_FIELD_DESC = new org.apache.thrift.protocol.TField("range", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_range_slices_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_range_slices_argsTupleSchemeFactory()); +- } +- +- public ColumnParent column_parent; // required +- public SlicePredicate predicate; // required +- public KeyRange range; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- COLUMN_PARENT((short)1, "column_parent"), +- PREDICATE((short)2, "predicate"), +- RANGE((short)3, "range"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)4, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // COLUMN_PARENT +- return COLUMN_PARENT; +- case 2: // PREDICATE +- return PREDICATE; +- case 3: // RANGE +- return RANGE; +- case 4: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.COLUMN_PARENT, new org.apache.thrift.meta_data.FieldMetaData("column_parent", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnParent.class))); +- tmpMap.put(_Fields.PREDICATE, new org.apache.thrift.meta_data.FieldMetaData("predicate", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SlicePredicate.class))); +- tmpMap.put(_Fields.RANGE, new org.apache.thrift.meta_data.FieldMetaData("range", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, KeyRange.class))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_range_slices_args.class, metaDataMap); +- } +- +- public get_range_slices_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public get_range_slices_args( +- ColumnParent column_parent, +- SlicePredicate predicate, +- KeyRange range, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.column_parent = column_parent; +- this.predicate = predicate; +- this.range = range; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_range_slices_args(get_range_slices_args other) { +- if (other.isSetColumn_parent()) { +- this.column_parent = new ColumnParent(other.column_parent); +- } +- if (other.isSetPredicate()) { +- this.predicate = new SlicePredicate(other.predicate); +- } +- if (other.isSetRange()) { +- this.range = new KeyRange(other.range); +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public get_range_slices_args deepCopy() { +- return new get_range_slices_args(this); +- } +- +- @Override +- public void clear() { +- this.column_parent = null; +- this.predicate = null; +- this.range = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public ColumnParent getColumn_parent() { +- return this.column_parent; +- } +- +- public get_range_slices_args setColumn_parent(ColumnParent column_parent) { +- this.column_parent = column_parent; +- return this; +- } +- +- public void unsetColumn_parent() { +- this.column_parent = null; +- } +- +- /** Returns true if field column_parent is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_parent() { +- return this.column_parent != null; +- } +- +- public void setColumn_parentIsSet(boolean value) { +- if (!value) { +- this.column_parent = null; +- } +- } +- +- public SlicePredicate getPredicate() { +- return this.predicate; +- } +- +- public get_range_slices_args setPredicate(SlicePredicate predicate) { +- this.predicate = predicate; +- return this; +- } +- +- public void unsetPredicate() { +- this.predicate = null; +- } +- +- /** Returns true if field predicate is set (has been assigned a value) and false otherwise */ +- public boolean isSetPredicate() { +- return this.predicate != null; +- } +- +- public void setPredicateIsSet(boolean value) { +- if (!value) { +- this.predicate = null; +- } +- } +- +- public KeyRange getRange() { +- return this.range; +- } +- +- public get_range_slices_args setRange(KeyRange range) { +- this.range = range; +- return this; +- } +- +- public void unsetRange() { +- this.range = null; +- } +- +- /** Returns true if field range is set (has been assigned a value) and false otherwise */ +- public boolean isSetRange() { +- return this.range != null; +- } +- +- public void setRangeIsSet(boolean value) { +- if (!value) { +- this.range = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public get_range_slices_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case COLUMN_PARENT: +- if (value == null) { +- unsetColumn_parent(); +- } else { +- setColumn_parent((ColumnParent)value); +- } +- break; +- +- case PREDICATE: +- if (value == null) { +- unsetPredicate(); +- } else { +- setPredicate((SlicePredicate)value); +- } +- break; +- +- case RANGE: +- if (value == null) { +- unsetRange(); +- } else { +- setRange((KeyRange)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case COLUMN_PARENT: +- return getColumn_parent(); +- +- case PREDICATE: +- return getPredicate(); +- +- case RANGE: +- return getRange(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case COLUMN_PARENT: +- return isSetColumn_parent(); +- case PREDICATE: +- return isSetPredicate(); +- case RANGE: +- return isSetRange(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_range_slices_args) +- return this.equals((get_range_slices_args)that); +- return false; +- } +- +- public boolean equals(get_range_slices_args that) { +- if (that == null) +- return false; +- +- boolean this_present_column_parent = true && this.isSetColumn_parent(); +- boolean that_present_column_parent = true && that.isSetColumn_parent(); +- if (this_present_column_parent || that_present_column_parent) { +- if (!(this_present_column_parent && that_present_column_parent)) +- return false; +- if (!this.column_parent.equals(that.column_parent)) +- return false; +- } +- +- boolean this_present_predicate = true && this.isSetPredicate(); +- boolean that_present_predicate = true && that.isSetPredicate(); +- if (this_present_predicate || that_present_predicate) { +- if (!(this_present_predicate && that_present_predicate)) +- return false; +- if (!this.predicate.equals(that.predicate)) +- return false; +- } +- +- boolean this_present_range = true && this.isSetRange(); +- boolean that_present_range = true && that.isSetRange(); +- if (this_present_range || that_present_range) { +- if (!(this_present_range && that_present_range)) +- return false; +- if (!this.range.equals(that.range)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_column_parent = true && (isSetColumn_parent()); +- builder.append(present_column_parent); +- if (present_column_parent) +- builder.append(column_parent); +- +- boolean present_predicate = true && (isSetPredicate()); +- builder.append(present_predicate); +- if (present_predicate) +- builder.append(predicate); +- +- boolean present_range = true && (isSetRange()); +- builder.append(present_range); +- if (present_range) +- builder.append(range); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_range_slices_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetColumn_parent()).compareTo(other.isSetColumn_parent()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_parent()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_parent, other.column_parent); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetPredicate()).compareTo(other.isSetPredicate()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetPredicate()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.predicate, other.predicate); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRange()).compareTo(other.isSetRange()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRange()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.range, other.range); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_range_slices_args("); +- boolean first = true; +- +- sb.append("column_parent:"); +- if (this.column_parent == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_parent); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("predicate:"); +- if (this.predicate == null) { +- sb.append("null"); +- } else { +- sb.append(this.predicate); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("range:"); +- if (this.range == null) { +- sb.append("null"); +- } else { +- sb.append(this.range); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (column_parent == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_parent' was not present! Struct: " + toString()); +- } +- if (predicate == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'predicate' was not present! Struct: " + toString()); +- } +- if (range == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'range' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (column_parent != null) { +- column_parent.validate(); +- } +- if (predicate != null) { +- predicate.validate(); +- } +- if (range != null) { +- range.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_range_slices_argsStandardSchemeFactory implements SchemeFactory { +- public get_range_slices_argsStandardScheme getScheme() { +- return new get_range_slices_argsStandardScheme(); +- } +- } +- +- private static class get_range_slices_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_range_slices_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // COLUMN_PARENT +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // PREDICATE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // RANGE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.range = new KeyRange(); +- struct.range.read(iprot); +- struct.setRangeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_range_slices_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.column_parent != null) { +- oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC); +- struct.column_parent.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.predicate != null) { +- oprot.writeFieldBegin(PREDICATE_FIELD_DESC); +- struct.predicate.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.range != null) { +- oprot.writeFieldBegin(RANGE_FIELD_DESC); +- struct.range.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_range_slices_argsTupleSchemeFactory implements SchemeFactory { +- public get_range_slices_argsTupleScheme getScheme() { +- return new get_range_slices_argsTupleScheme(); +- } +- } +- +- private static class get_range_slices_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_range_slices_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- struct.column_parent.write(oprot); +- struct.predicate.write(oprot); +- struct.range.write(oprot); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_range_slices_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- struct.range = new KeyRange(); +- struct.range.read(iprot); +- struct.setRangeIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class get_range_slices_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_range_slices_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_range_slices_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_range_slices_resultTupleSchemeFactory()); +- } +- +- public List success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, KeySlice.class)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_range_slices_result.class, metaDataMap); +- } +- +- public get_range_slices_result() { +- } +- +- public get_range_slices_result( +- List success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_range_slices_result(get_range_slices_result other) { +- if (other.isSetSuccess()) { +- List __this__success = new ArrayList(other.success.size()); +- for (KeySlice other_element : other.success) { +- __this__success.add(new KeySlice(other_element)); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public get_range_slices_result deepCopy() { +- return new get_range_slices_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public java.util.Iterator getSuccessIterator() { +- return (this.success == null) ? null : this.success.iterator(); +- } +- +- public void addToSuccess(KeySlice elem) { +- if (this.success == null) { +- this.success = new ArrayList(); +- } +- this.success.add(elem); +- } +- +- public List getSuccess() { +- return this.success; +- } +- +- public get_range_slices_result setSuccess(List success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public get_range_slices_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public get_range_slices_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public get_range_slices_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((List)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_range_slices_result) +- return this.equals((get_range_slices_result)that); +- return false; +- } +- +- public boolean equals(get_range_slices_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_range_slices_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_range_slices_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_range_slices_resultStandardSchemeFactory implements SchemeFactory { +- public get_range_slices_resultStandardScheme getScheme() { +- return new get_range_slices_resultStandardScheme(); +- } +- } +- +- private static class get_range_slices_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_range_slices_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list276 = iprot.readListBegin(); +- struct.success = new ArrayList(_list276.size); +- for (int _i277 = 0; _i277 < _list276.size; ++_i277) +- { +- KeySlice _elem278; +- _elem278 = new KeySlice(); +- _elem278.read(iprot); +- struct.success.add(_elem278); +- } +- iprot.readListEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_range_slices_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); +- for (KeySlice _iter279 : struct.success) +- { +- _iter279.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_range_slices_resultTupleSchemeFactory implements SchemeFactory { +- public get_range_slices_resultTupleScheme getScheme() { +- return new get_range_slices_resultTupleScheme(); +- } +- } +- +- private static class get_range_slices_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_range_slices_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- oprot.writeBitSet(optionals, 4); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (KeySlice _iter280 : struct.success) +- { +- _iter280.write(oprot); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_range_slices_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(4); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.success = new ArrayList(_list281.size); +- for (int _i282 = 0; _i282 < _list281.size; ++_i282) +- { +- KeySlice _elem283; +- _elem283 = new KeySlice(); +- _elem283.read(iprot); +- struct.success.add(_elem283); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class get_paged_slice_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_paged_slice_args"); +- +- private static final org.apache.thrift.protocol.TField COLUMN_FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("column_family", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField RANGE_FIELD_DESC = new org.apache.thrift.protocol.TField("range", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField START_COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("start_column", org.apache.thrift.protocol.TType.STRING, (short)3); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_paged_slice_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_paged_slice_argsTupleSchemeFactory()); +- } +- +- public String column_family; // required +- public KeyRange range; // required +- public ByteBuffer start_column; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- COLUMN_FAMILY((short)1, "column_family"), +- RANGE((short)2, "range"), +- START_COLUMN((short)3, "start_column"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)4, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // COLUMN_FAMILY +- return COLUMN_FAMILY; +- case 2: // RANGE +- return RANGE; +- case 3: // START_COLUMN +- return START_COLUMN; +- case 4: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.COLUMN_FAMILY, new org.apache.thrift.meta_data.FieldMetaData("column_family", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.RANGE, new org.apache.thrift.meta_data.FieldMetaData("range", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, KeyRange.class))); +- tmpMap.put(_Fields.START_COLUMN, new org.apache.thrift.meta_data.FieldMetaData("start_column", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_paged_slice_args.class, metaDataMap); +- } +- +- public get_paged_slice_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public get_paged_slice_args( +- String column_family, +- KeyRange range, +- ByteBuffer start_column, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.column_family = column_family; +- this.range = range; +- this.start_column = start_column; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_paged_slice_args(get_paged_slice_args other) { +- if (other.isSetColumn_family()) { +- this.column_family = other.column_family; +- } +- if (other.isSetRange()) { +- this.range = new KeyRange(other.range); +- } +- if (other.isSetStart_column()) { +- this.start_column = org.apache.thrift.TBaseHelper.copyBinary(other.start_column); +-; +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public get_paged_slice_args deepCopy() { +- return new get_paged_slice_args(this); +- } +- +- @Override +- public void clear() { +- this.column_family = null; +- this.range = null; +- this.start_column = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public String getColumn_family() { +- return this.column_family; +- } +- +- public get_paged_slice_args setColumn_family(String column_family) { +- this.column_family = column_family; +- return this; +- } +- +- public void unsetColumn_family() { +- this.column_family = null; +- } +- +- /** Returns true if field column_family is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_family() { +- return this.column_family != null; +- } +- +- public void setColumn_familyIsSet(boolean value) { +- if (!value) { +- this.column_family = null; +- } +- } +- +- public KeyRange getRange() { +- return this.range; +- } +- +- public get_paged_slice_args setRange(KeyRange range) { +- this.range = range; +- return this; +- } +- +- public void unsetRange() { +- this.range = null; +- } +- +- /** Returns true if field range is set (has been assigned a value) and false otherwise */ +- public boolean isSetRange() { +- return this.range != null; +- } +- +- public void setRangeIsSet(boolean value) { +- if (!value) { +- this.range = null; +- } +- } +- +- public byte[] getStart_column() { +- setStart_column(org.apache.thrift.TBaseHelper.rightSize(start_column)); +- return start_column == null ? null : start_column.array(); +- } +- +- public ByteBuffer bufferForStart_column() { +- return start_column; +- } +- +- public get_paged_slice_args setStart_column(byte[] start_column) { +- setStart_column(start_column == null ? (ByteBuffer)null : ByteBuffer.wrap(start_column)); +- return this; +- } +- +- public get_paged_slice_args setStart_column(ByteBuffer start_column) { +- this.start_column = start_column; +- return this; +- } +- +- public void unsetStart_column() { +- this.start_column = null; +- } +- +- /** Returns true if field start_column is set (has been assigned a value) and false otherwise */ +- public boolean isSetStart_column() { +- return this.start_column != null; +- } +- +- public void setStart_columnIsSet(boolean value) { +- if (!value) { +- this.start_column = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public get_paged_slice_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case COLUMN_FAMILY: +- if (value == null) { +- unsetColumn_family(); +- } else { +- setColumn_family((String)value); +- } +- break; +- +- case RANGE: +- if (value == null) { +- unsetRange(); +- } else { +- setRange((KeyRange)value); +- } +- break; +- +- case START_COLUMN: +- if (value == null) { +- unsetStart_column(); +- } else { +- setStart_column((ByteBuffer)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case COLUMN_FAMILY: +- return getColumn_family(); +- +- case RANGE: +- return getRange(); +- +- case START_COLUMN: +- return getStart_column(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case COLUMN_FAMILY: +- return isSetColumn_family(); +- case RANGE: +- return isSetRange(); +- case START_COLUMN: +- return isSetStart_column(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_paged_slice_args) +- return this.equals((get_paged_slice_args)that); +- return false; +- } +- +- public boolean equals(get_paged_slice_args that) { +- if (that == null) +- return false; +- +- boolean this_present_column_family = true && this.isSetColumn_family(); +- boolean that_present_column_family = true && that.isSetColumn_family(); +- if (this_present_column_family || that_present_column_family) { +- if (!(this_present_column_family && that_present_column_family)) +- return false; +- if (!this.column_family.equals(that.column_family)) +- return false; +- } +- +- boolean this_present_range = true && this.isSetRange(); +- boolean that_present_range = true && that.isSetRange(); +- if (this_present_range || that_present_range) { +- if (!(this_present_range && that_present_range)) +- return false; +- if (!this.range.equals(that.range)) +- return false; +- } +- +- boolean this_present_start_column = true && this.isSetStart_column(); +- boolean that_present_start_column = true && that.isSetStart_column(); +- if (this_present_start_column || that_present_start_column) { +- if (!(this_present_start_column && that_present_start_column)) +- return false; +- if (!this.start_column.equals(that.start_column)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_column_family = true && (isSetColumn_family()); +- builder.append(present_column_family); +- if (present_column_family) +- builder.append(column_family); +- +- boolean present_range = true && (isSetRange()); +- builder.append(present_range); +- if (present_range) +- builder.append(range); +- +- boolean present_start_column = true && (isSetStart_column()); +- builder.append(present_start_column); +- if (present_start_column) +- builder.append(start_column); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_paged_slice_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetColumn_family()).compareTo(other.isSetColumn_family()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_family()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_family, other.column_family); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRange()).compareTo(other.isSetRange()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRange()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.range, other.range); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetStart_column()).compareTo(other.isSetStart_column()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStart_column()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start_column, other.start_column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_paged_slice_args("); +- boolean first = true; +- +- sb.append("column_family:"); +- if (this.column_family == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_family); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("range:"); +- if (this.range == null) { +- sb.append("null"); +- } else { +- sb.append(this.range); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("start_column:"); +- if (this.start_column == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.start_column, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (column_family == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_family' was not present! Struct: " + toString()); +- } +- if (range == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'range' was not present! Struct: " + toString()); +- } +- if (start_column == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'start_column' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (range != null) { +- range.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_paged_slice_argsStandardSchemeFactory implements SchemeFactory { +- public get_paged_slice_argsStandardScheme getScheme() { +- return new get_paged_slice_argsStandardScheme(); +- } +- } +- +- private static class get_paged_slice_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_paged_slice_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // COLUMN_FAMILY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.column_family = iprot.readString(); +- struct.setColumn_familyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // RANGE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.range = new KeyRange(); +- struct.range.read(iprot); +- struct.setRangeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // START_COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.start_column = iprot.readBinary(); +- struct.setStart_columnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_paged_slice_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.column_family != null) { +- oprot.writeFieldBegin(COLUMN_FAMILY_FIELD_DESC); +- oprot.writeString(struct.column_family); +- oprot.writeFieldEnd(); +- } +- if (struct.range != null) { +- oprot.writeFieldBegin(RANGE_FIELD_DESC); +- struct.range.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.start_column != null) { +- oprot.writeFieldBegin(START_COLUMN_FIELD_DESC); +- oprot.writeBinary(struct.start_column); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_paged_slice_argsTupleSchemeFactory implements SchemeFactory { +- public get_paged_slice_argsTupleScheme getScheme() { +- return new get_paged_slice_argsTupleScheme(); +- } +- } +- +- private static class get_paged_slice_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_paged_slice_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.column_family); +- struct.range.write(oprot); +- oprot.writeBinary(struct.start_column); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_paged_slice_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.column_family = iprot.readString(); +- struct.setColumn_familyIsSet(true); +- struct.range = new KeyRange(); +- struct.range.read(iprot); +- struct.setRangeIsSet(true); +- struct.start_column = iprot.readBinary(); +- struct.setStart_columnIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class get_paged_slice_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_paged_slice_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_paged_slice_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_paged_slice_resultTupleSchemeFactory()); +- } +- +- public List success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, KeySlice.class)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_paged_slice_result.class, metaDataMap); +- } +- +- public get_paged_slice_result() { +- } +- +- public get_paged_slice_result( +- List success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_paged_slice_result(get_paged_slice_result other) { +- if (other.isSetSuccess()) { +- List __this__success = new ArrayList(other.success.size()); +- for (KeySlice other_element : other.success) { +- __this__success.add(new KeySlice(other_element)); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public get_paged_slice_result deepCopy() { +- return new get_paged_slice_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public java.util.Iterator getSuccessIterator() { +- return (this.success == null) ? null : this.success.iterator(); +- } +- +- public void addToSuccess(KeySlice elem) { +- if (this.success == null) { +- this.success = new ArrayList(); +- } +- this.success.add(elem); +- } +- +- public List getSuccess() { +- return this.success; +- } +- +- public get_paged_slice_result setSuccess(List success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public get_paged_slice_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public get_paged_slice_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public get_paged_slice_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((List)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_paged_slice_result) +- return this.equals((get_paged_slice_result)that); +- return false; +- } +- +- public boolean equals(get_paged_slice_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_paged_slice_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_paged_slice_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_paged_slice_resultStandardSchemeFactory implements SchemeFactory { +- public get_paged_slice_resultStandardScheme getScheme() { +- return new get_paged_slice_resultStandardScheme(); +- } +- } +- +- private static class get_paged_slice_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_paged_slice_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list284 = iprot.readListBegin(); +- struct.success = new ArrayList(_list284.size); +- for (int _i285 = 0; _i285 < _list284.size; ++_i285) +- { +- KeySlice _elem286; +- _elem286 = new KeySlice(); +- _elem286.read(iprot); +- struct.success.add(_elem286); +- } +- iprot.readListEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_paged_slice_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); +- for (KeySlice _iter287 : struct.success) +- { +- _iter287.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_paged_slice_resultTupleSchemeFactory implements SchemeFactory { +- public get_paged_slice_resultTupleScheme getScheme() { +- return new get_paged_slice_resultTupleScheme(); +- } +- } +- +- private static class get_paged_slice_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_paged_slice_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- oprot.writeBitSet(optionals, 4); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (KeySlice _iter288 : struct.success) +- { +- _iter288.write(oprot); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_paged_slice_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(4); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list289 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.success = new ArrayList(_list289.size); +- for (int _i290 = 0; _i290 < _list289.size; ++_i290) +- { +- KeySlice _elem291; +- _elem291 = new KeySlice(); +- _elem291.read(iprot); +- struct.success.add(_elem291); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class get_indexed_slices_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_indexed_slices_args"); +- +- private static final org.apache.thrift.protocol.TField COLUMN_PARENT_FIELD_DESC = new org.apache.thrift.protocol.TField("column_parent", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField INDEX_CLAUSE_FIELD_DESC = new org.apache.thrift.protocol.TField("index_clause", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField COLUMN_PREDICATE_FIELD_DESC = new org.apache.thrift.protocol.TField("column_predicate", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_indexed_slices_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_indexed_slices_argsTupleSchemeFactory()); +- } +- +- public ColumnParent column_parent; // required +- public IndexClause index_clause; // required +- public SlicePredicate column_predicate; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- COLUMN_PARENT((short)1, "column_parent"), +- INDEX_CLAUSE((short)2, "index_clause"), +- COLUMN_PREDICATE((short)3, "column_predicate"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)4, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // COLUMN_PARENT +- return COLUMN_PARENT; +- case 2: // INDEX_CLAUSE +- return INDEX_CLAUSE; +- case 3: // COLUMN_PREDICATE +- return COLUMN_PREDICATE; +- case 4: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.COLUMN_PARENT, new org.apache.thrift.meta_data.FieldMetaData("column_parent", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnParent.class))); +- tmpMap.put(_Fields.INDEX_CLAUSE, new org.apache.thrift.meta_data.FieldMetaData("index_clause", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IndexClause.class))); +- tmpMap.put(_Fields.COLUMN_PREDICATE, new org.apache.thrift.meta_data.FieldMetaData("column_predicate", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SlicePredicate.class))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_indexed_slices_args.class, metaDataMap); +- } +- +- public get_indexed_slices_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public get_indexed_slices_args( +- ColumnParent column_parent, +- IndexClause index_clause, +- SlicePredicate column_predicate, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.column_parent = column_parent; +- this.index_clause = index_clause; +- this.column_predicate = column_predicate; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_indexed_slices_args(get_indexed_slices_args other) { +- if (other.isSetColumn_parent()) { +- this.column_parent = new ColumnParent(other.column_parent); +- } +- if (other.isSetIndex_clause()) { +- this.index_clause = new IndexClause(other.index_clause); +- } +- if (other.isSetColumn_predicate()) { +- this.column_predicate = new SlicePredicate(other.column_predicate); +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public get_indexed_slices_args deepCopy() { +- return new get_indexed_slices_args(this); +- } +- +- @Override +- public void clear() { +- this.column_parent = null; +- this.index_clause = null; +- this.column_predicate = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public ColumnParent getColumn_parent() { +- return this.column_parent; +- } +- +- public get_indexed_slices_args setColumn_parent(ColumnParent column_parent) { +- this.column_parent = column_parent; +- return this; +- } +- +- public void unsetColumn_parent() { +- this.column_parent = null; +- } +- +- /** Returns true if field column_parent is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_parent() { +- return this.column_parent != null; +- } +- +- public void setColumn_parentIsSet(boolean value) { +- if (!value) { +- this.column_parent = null; +- } +- } +- +- public IndexClause getIndex_clause() { +- return this.index_clause; +- } +- +- public get_indexed_slices_args setIndex_clause(IndexClause index_clause) { +- this.index_clause = index_clause; +- return this; +- } +- +- public void unsetIndex_clause() { +- this.index_clause = null; +- } +- +- /** Returns true if field index_clause is set (has been assigned a value) and false otherwise */ +- public boolean isSetIndex_clause() { +- return this.index_clause != null; +- } +- +- public void setIndex_clauseIsSet(boolean value) { +- if (!value) { +- this.index_clause = null; +- } +- } +- +- public SlicePredicate getColumn_predicate() { +- return this.column_predicate; +- } +- +- public get_indexed_slices_args setColumn_predicate(SlicePredicate column_predicate) { +- this.column_predicate = column_predicate; +- return this; +- } +- +- public void unsetColumn_predicate() { +- this.column_predicate = null; +- } +- +- /** Returns true if field column_predicate is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_predicate() { +- return this.column_predicate != null; +- } +- +- public void setColumn_predicateIsSet(boolean value) { +- if (!value) { +- this.column_predicate = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public get_indexed_slices_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case COLUMN_PARENT: +- if (value == null) { +- unsetColumn_parent(); +- } else { +- setColumn_parent((ColumnParent)value); +- } +- break; +- +- case INDEX_CLAUSE: +- if (value == null) { +- unsetIndex_clause(); +- } else { +- setIndex_clause((IndexClause)value); +- } +- break; +- +- case COLUMN_PREDICATE: +- if (value == null) { +- unsetColumn_predicate(); +- } else { +- setColumn_predicate((SlicePredicate)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case COLUMN_PARENT: +- return getColumn_parent(); +- +- case INDEX_CLAUSE: +- return getIndex_clause(); +- +- case COLUMN_PREDICATE: +- return getColumn_predicate(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case COLUMN_PARENT: +- return isSetColumn_parent(); +- case INDEX_CLAUSE: +- return isSetIndex_clause(); +- case COLUMN_PREDICATE: +- return isSetColumn_predicate(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_indexed_slices_args) +- return this.equals((get_indexed_slices_args)that); +- return false; +- } +- +- public boolean equals(get_indexed_slices_args that) { +- if (that == null) +- return false; +- +- boolean this_present_column_parent = true && this.isSetColumn_parent(); +- boolean that_present_column_parent = true && that.isSetColumn_parent(); +- if (this_present_column_parent || that_present_column_parent) { +- if (!(this_present_column_parent && that_present_column_parent)) +- return false; +- if (!this.column_parent.equals(that.column_parent)) +- return false; +- } +- +- boolean this_present_index_clause = true && this.isSetIndex_clause(); +- boolean that_present_index_clause = true && that.isSetIndex_clause(); +- if (this_present_index_clause || that_present_index_clause) { +- if (!(this_present_index_clause && that_present_index_clause)) +- return false; +- if (!this.index_clause.equals(that.index_clause)) +- return false; +- } +- +- boolean this_present_column_predicate = true && this.isSetColumn_predicate(); +- boolean that_present_column_predicate = true && that.isSetColumn_predicate(); +- if (this_present_column_predicate || that_present_column_predicate) { +- if (!(this_present_column_predicate && that_present_column_predicate)) +- return false; +- if (!this.column_predicate.equals(that.column_predicate)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_column_parent = true && (isSetColumn_parent()); +- builder.append(present_column_parent); +- if (present_column_parent) +- builder.append(column_parent); +- +- boolean present_index_clause = true && (isSetIndex_clause()); +- builder.append(present_index_clause); +- if (present_index_clause) +- builder.append(index_clause); +- +- boolean present_column_predicate = true && (isSetColumn_predicate()); +- builder.append(present_column_predicate); +- if (present_column_predicate) +- builder.append(column_predicate); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_indexed_slices_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetColumn_parent()).compareTo(other.isSetColumn_parent()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_parent()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_parent, other.column_parent); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIndex_clause()).compareTo(other.isSetIndex_clause()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIndex_clause()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_clause, other.index_clause); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_predicate()).compareTo(other.isSetColumn_predicate()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_predicate()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_predicate, other.column_predicate); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_indexed_slices_args("); +- boolean first = true; +- +- sb.append("column_parent:"); +- if (this.column_parent == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_parent); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("index_clause:"); +- if (this.index_clause == null) { +- sb.append("null"); +- } else { +- sb.append(this.index_clause); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column_predicate:"); +- if (this.column_predicate == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_predicate); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (column_parent == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_parent' was not present! Struct: " + toString()); +- } +- if (index_clause == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'index_clause' was not present! Struct: " + toString()); +- } +- if (column_predicate == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_predicate' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (column_parent != null) { +- column_parent.validate(); +- } +- if (index_clause != null) { +- index_clause.validate(); +- } +- if (column_predicate != null) { +- column_predicate.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_indexed_slices_argsStandardSchemeFactory implements SchemeFactory { +- public get_indexed_slices_argsStandardScheme getScheme() { +- return new get_indexed_slices_argsStandardScheme(); +- } +- } +- +- private static class get_indexed_slices_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexed_slices_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // COLUMN_PARENT +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // INDEX_CLAUSE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.index_clause = new IndexClause(); +- struct.index_clause.read(iprot); +- struct.setIndex_clauseIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // COLUMN_PREDICATE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_predicate = new SlicePredicate(); +- struct.column_predicate.read(iprot); +- struct.setColumn_predicateIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexed_slices_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.column_parent != null) { +- oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC); +- struct.column_parent.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.index_clause != null) { +- oprot.writeFieldBegin(INDEX_CLAUSE_FIELD_DESC); +- struct.index_clause.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.column_predicate != null) { +- oprot.writeFieldBegin(COLUMN_PREDICATE_FIELD_DESC); +- struct.column_predicate.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_indexed_slices_argsTupleSchemeFactory implements SchemeFactory { +- public get_indexed_slices_argsTupleScheme getScheme() { +- return new get_indexed_slices_argsTupleScheme(); +- } +- } +- +- private static class get_indexed_slices_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_indexed_slices_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- struct.column_parent.write(oprot); +- struct.index_clause.write(oprot); +- struct.column_predicate.write(oprot); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_indexed_slices_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- struct.index_clause = new IndexClause(); +- struct.index_clause.read(iprot); +- struct.setIndex_clauseIsSet(true); +- struct.column_predicate = new SlicePredicate(); +- struct.column_predicate.read(iprot); +- struct.setColumn_predicateIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class get_indexed_slices_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_indexed_slices_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_indexed_slices_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_indexed_slices_resultTupleSchemeFactory()); +- } +- +- public List success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, KeySlice.class)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_indexed_slices_result.class, metaDataMap); +- } +- +- public get_indexed_slices_result() { +- } +- +- public get_indexed_slices_result( +- List success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_indexed_slices_result(get_indexed_slices_result other) { +- if (other.isSetSuccess()) { +- List __this__success = new ArrayList(other.success.size()); +- for (KeySlice other_element : other.success) { +- __this__success.add(new KeySlice(other_element)); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public get_indexed_slices_result deepCopy() { +- return new get_indexed_slices_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public java.util.Iterator getSuccessIterator() { +- return (this.success == null) ? null : this.success.iterator(); +- } +- +- public void addToSuccess(KeySlice elem) { +- if (this.success == null) { +- this.success = new ArrayList(); +- } +- this.success.add(elem); +- } +- +- public List getSuccess() { +- return this.success; +- } +- +- public get_indexed_slices_result setSuccess(List success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public get_indexed_slices_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public get_indexed_slices_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public get_indexed_slices_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((List)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_indexed_slices_result) +- return this.equals((get_indexed_slices_result)that); +- return false; +- } +- +- public boolean equals(get_indexed_slices_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_indexed_slices_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_indexed_slices_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_indexed_slices_resultStandardSchemeFactory implements SchemeFactory { +- public get_indexed_slices_resultStandardScheme getScheme() { +- return new get_indexed_slices_resultStandardScheme(); +- } +- } +- +- private static class get_indexed_slices_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexed_slices_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list292 = iprot.readListBegin(); +- struct.success = new ArrayList(_list292.size); +- for (int _i293 = 0; _i293 < _list292.size; ++_i293) +- { +- KeySlice _elem294; +- _elem294 = new KeySlice(); +- _elem294.read(iprot); +- struct.success.add(_elem294); +- } +- iprot.readListEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexed_slices_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); +- for (KeySlice _iter295 : struct.success) +- { +- _iter295.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_indexed_slices_resultTupleSchemeFactory implements SchemeFactory { +- public get_indexed_slices_resultTupleScheme getScheme() { +- return new get_indexed_slices_resultTupleScheme(); +- } +- } +- +- private static class get_indexed_slices_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_indexed_slices_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- oprot.writeBitSet(optionals, 4); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (KeySlice _iter296 : struct.success) +- { +- _iter296.write(oprot); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_indexed_slices_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(4); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list297 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.success = new ArrayList(_list297.size); +- for (int _i298 = 0; _i298 < _list297.size; ++_i298) +- { +- KeySlice _elem299; +- _elem299 = new KeySlice(); +- _elem299.read(iprot); +- struct.success.add(_elem299); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class insert_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("insert_args"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMN_PARENT_FIELD_DESC = new org.apache.thrift.protocol.TField("column_parent", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new insert_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new insert_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public ColumnParent column_parent; // required +- public Column column; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COLUMN_PARENT((short)2, "column_parent"), +- COLUMN((short)3, "column"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)4, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COLUMN_PARENT +- return COLUMN_PARENT; +- case 3: // COLUMN +- return COLUMN; +- case 4: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMN_PARENT, new org.apache.thrift.meta_data.FieldMetaData("column_parent", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnParent.class))); +- tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Column.class))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(insert_args.class, metaDataMap); +- } +- +- public insert_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public insert_args( +- ByteBuffer key, +- ColumnParent column_parent, +- Column column, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.key = key; +- this.column_parent = column_parent; +- this.column = column; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public insert_args(insert_args other) { +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetColumn_parent()) { +- this.column_parent = new ColumnParent(other.column_parent); +- } +- if (other.isSetColumn()) { +- this.column = new Column(other.column); +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public insert_args deepCopy() { +- return new insert_args(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.column_parent = null; +- this.column = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public insert_args setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public insert_args setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public ColumnParent getColumn_parent() { +- return this.column_parent; +- } +- +- public insert_args setColumn_parent(ColumnParent column_parent) { +- this.column_parent = column_parent; +- return this; +- } +- +- public void unsetColumn_parent() { +- this.column_parent = null; +- } +- +- /** Returns true if field column_parent is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_parent() { +- return this.column_parent != null; +- } +- +- public void setColumn_parentIsSet(boolean value) { +- if (!value) { +- this.column_parent = null; +- } +- } +- +- public Column getColumn() { +- return this.column; +- } +- +- public insert_args setColumn(Column column) { +- this.column = column; +- return this; +- } +- +- public void unsetColumn() { +- this.column = null; +- } +- +- /** Returns true if field column is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn() { +- return this.column != null; +- } +- +- public void setColumnIsSet(boolean value) { +- if (!value) { +- this.column = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public insert_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COLUMN_PARENT: +- if (value == null) { +- unsetColumn_parent(); +- } else { +- setColumn_parent((ColumnParent)value); +- } +- break; +- +- case COLUMN: +- if (value == null) { +- unsetColumn(); +- } else { +- setColumn((Column)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COLUMN_PARENT: +- return getColumn_parent(); +- +- case COLUMN: +- return getColumn(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COLUMN_PARENT: +- return isSetColumn_parent(); +- case COLUMN: +- return isSetColumn(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof insert_args) +- return this.equals((insert_args)that); +- return false; +- } +- +- public boolean equals(insert_args that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_column_parent = true && this.isSetColumn_parent(); +- boolean that_present_column_parent = true && that.isSetColumn_parent(); +- if (this_present_column_parent || that_present_column_parent) { +- if (!(this_present_column_parent && that_present_column_parent)) +- return false; +- if (!this.column_parent.equals(that.column_parent)) +- return false; +- } +- +- boolean this_present_column = true && this.isSetColumn(); +- boolean that_present_column = true && that.isSetColumn(); +- if (this_present_column || that_present_column) { +- if (!(this_present_column && that_present_column)) +- return false; +- if (!this.column.equals(that.column)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_column_parent = true && (isSetColumn_parent()); +- builder.append(present_column_parent); +- if (present_column_parent) +- builder.append(column_parent); +- +- boolean present_column = true && (isSetColumn()); +- builder.append(present_column); +- if (present_column) +- builder.append(column); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(insert_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_parent()).compareTo(other.isSetColumn_parent()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_parent()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_parent, other.column_parent); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn()).compareTo(other.isSetColumn()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column, other.column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("insert_args("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column_parent:"); +- if (this.column_parent == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_parent); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column:"); +- if (this.column == null) { +- sb.append("null"); +- } else { +- sb.append(this.column); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- if (column_parent == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_parent' was not present! Struct: " + toString()); +- } +- if (column == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (column_parent != null) { +- column_parent.validate(); +- } +- if (column != null) { +- column.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class insert_argsStandardSchemeFactory implements SchemeFactory { +- public insert_argsStandardScheme getScheme() { +- return new insert_argsStandardScheme(); +- } +- } +- +- private static class insert_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, insert_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMN_PARENT +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column = new Column(); +- struct.column.read(iprot); +- struct.setColumnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, insert_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- if (struct.column_parent != null) { +- oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC); +- struct.column_parent.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.column != null) { +- oprot.writeFieldBegin(COLUMN_FIELD_DESC); +- struct.column.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class insert_argsTupleSchemeFactory implements SchemeFactory { +- public insert_argsTupleScheme getScheme() { +- return new insert_argsTupleScheme(); +- } +- } +- +- private static class insert_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, insert_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- struct.column_parent.write(oprot); +- struct.column.write(oprot); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, insert_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- struct.column = new Column(); +- struct.column.read(iprot); +- struct.setColumnIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class insert_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("insert_result"); +- +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new insert_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new insert_resultTupleSchemeFactory()); +- } +- +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(insert_result.class, metaDataMap); +- } +- +- public insert_result() { +- } +- +- public insert_result( +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public insert_result(insert_result other) { +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public insert_result deepCopy() { +- return new insert_result(this); +- } +- +- @Override +- public void clear() { +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public insert_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public insert_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public insert_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof insert_result) +- return this.equals((insert_result)that); +- return false; +- } +- +- public boolean equals(insert_result that) { +- if (that == null) +- return false; +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(insert_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("insert_result("); +- boolean first = true; +- +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class insert_resultStandardSchemeFactory implements SchemeFactory { +- public insert_resultStandardScheme getScheme() { +- return new insert_resultStandardScheme(); +- } +- } +- +- private static class insert_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, insert_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, insert_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class insert_resultTupleSchemeFactory implements SchemeFactory { +- public insert_resultTupleScheme getScheme() { +- return new insert_resultTupleScheme(); +- } +- } +- +- private static class insert_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, insert_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetIre()) { +- optionals.set(0); +- } +- if (struct.isSetUe()) { +- optionals.set(1); +- } +- if (struct.isSetTe()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, insert_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(2)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class add_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_args"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMN_PARENT_FIELD_DESC = new org.apache.thrift.protocol.TField("column_parent", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new add_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new add_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public ColumnParent column_parent; // required +- public CounterColumn column; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COLUMN_PARENT((short)2, "column_parent"), +- COLUMN((short)3, "column"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)4, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COLUMN_PARENT +- return COLUMN_PARENT; +- case 3: // COLUMN +- return COLUMN; +- case 4: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMN_PARENT, new org.apache.thrift.meta_data.FieldMetaData("column_parent", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnParent.class))); +- tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CounterColumn.class))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_args.class, metaDataMap); +- } +- +- public add_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public add_args( +- ByteBuffer key, +- ColumnParent column_parent, +- CounterColumn column, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.key = key; +- this.column_parent = column_parent; +- this.column = column; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public add_args(add_args other) { +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetColumn_parent()) { +- this.column_parent = new ColumnParent(other.column_parent); +- } +- if (other.isSetColumn()) { +- this.column = new CounterColumn(other.column); +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public add_args deepCopy() { +- return new add_args(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.column_parent = null; +- this.column = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public add_args setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public add_args setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public ColumnParent getColumn_parent() { +- return this.column_parent; +- } +- +- public add_args setColumn_parent(ColumnParent column_parent) { +- this.column_parent = column_parent; +- return this; +- } +- +- public void unsetColumn_parent() { +- this.column_parent = null; +- } +- +- /** Returns true if field column_parent is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_parent() { +- return this.column_parent != null; +- } +- +- public void setColumn_parentIsSet(boolean value) { +- if (!value) { +- this.column_parent = null; +- } +- } +- +- public CounterColumn getColumn() { +- return this.column; +- } +- +- public add_args setColumn(CounterColumn column) { +- this.column = column; +- return this; +- } +- +- public void unsetColumn() { +- this.column = null; +- } +- +- /** Returns true if field column is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn() { +- return this.column != null; +- } +- +- public void setColumnIsSet(boolean value) { +- if (!value) { +- this.column = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public add_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COLUMN_PARENT: +- if (value == null) { +- unsetColumn_parent(); +- } else { +- setColumn_parent((ColumnParent)value); +- } +- break; +- +- case COLUMN: +- if (value == null) { +- unsetColumn(); +- } else { +- setColumn((CounterColumn)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COLUMN_PARENT: +- return getColumn_parent(); +- +- case COLUMN: +- return getColumn(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COLUMN_PARENT: +- return isSetColumn_parent(); +- case COLUMN: +- return isSetColumn(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof add_args) +- return this.equals((add_args)that); +- return false; +- } +- +- public boolean equals(add_args that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_column_parent = true && this.isSetColumn_parent(); +- boolean that_present_column_parent = true && that.isSetColumn_parent(); +- if (this_present_column_parent || that_present_column_parent) { +- if (!(this_present_column_parent && that_present_column_parent)) +- return false; +- if (!this.column_parent.equals(that.column_parent)) +- return false; +- } +- +- boolean this_present_column = true && this.isSetColumn(); +- boolean that_present_column = true && that.isSetColumn(); +- if (this_present_column || that_present_column) { +- if (!(this_present_column && that_present_column)) +- return false; +- if (!this.column.equals(that.column)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_column_parent = true && (isSetColumn_parent()); +- builder.append(present_column_parent); +- if (present_column_parent) +- builder.append(column_parent); +- +- boolean present_column = true && (isSetColumn()); +- builder.append(present_column); +- if (present_column) +- builder.append(column); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(add_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_parent()).compareTo(other.isSetColumn_parent()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_parent()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_parent, other.column_parent); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn()).compareTo(other.isSetColumn()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column, other.column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("add_args("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column_parent:"); +- if (this.column_parent == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_parent); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column:"); +- if (this.column == null) { +- sb.append("null"); +- } else { +- sb.append(this.column); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- if (column_parent == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_parent' was not present! Struct: " + toString()); +- } +- if (column == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (column_parent != null) { +- column_parent.validate(); +- } +- if (column != null) { +- column.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class add_argsStandardSchemeFactory implements SchemeFactory { +- public add_argsStandardScheme getScheme() { +- return new add_argsStandardScheme(); +- } +- } +- +- private static class add_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, add_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMN_PARENT +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column = new CounterColumn(); +- struct.column.read(iprot); +- struct.setColumnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, add_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- if (struct.column_parent != null) { +- oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC); +- struct.column_parent.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.column != null) { +- oprot.writeFieldBegin(COLUMN_FIELD_DESC); +- struct.column.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class add_argsTupleSchemeFactory implements SchemeFactory { +- public add_argsTupleScheme getScheme() { +- return new add_argsTupleScheme(); +- } +- } +- +- private static class add_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, add_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- struct.column_parent.write(oprot); +- struct.column.write(oprot); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, add_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- struct.column = new CounterColumn(); +- struct.column.read(iprot); +- struct.setColumnIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class add_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_result"); +- +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new add_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new add_resultTupleSchemeFactory()); +- } +- +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_result.class, metaDataMap); +- } +- +- public add_result() { +- } +- +- public add_result( +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public add_result(add_result other) { +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public add_result deepCopy() { +- return new add_result(this); +- } +- +- @Override +- public void clear() { +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public add_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public add_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public add_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof add_result) +- return this.equals((add_result)that); +- return false; +- } +- +- public boolean equals(add_result that) { +- if (that == null) +- return false; +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(add_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("add_result("); +- boolean first = true; +- +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class add_resultStandardSchemeFactory implements SchemeFactory { +- public add_resultStandardScheme getScheme() { +- return new add_resultStandardScheme(); +- } +- } +- +- private static class add_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, add_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, add_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class add_resultTupleSchemeFactory implements SchemeFactory { +- public add_resultTupleScheme getScheme() { +- return new add_resultTupleScheme(); +- } +- } +- +- private static class add_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, add_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetIre()) { +- optionals.set(0); +- } +- if (struct.isSetUe()) { +- optionals.set(1); +- } +- if (struct.isSetTe()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, add_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(2)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class cas_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cas_args"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMN_FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("column_family", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField EXPECTED_FIELD_DESC = new org.apache.thrift.protocol.TField("expected", org.apache.thrift.protocol.TType.LIST, (short)3); +- private static final org.apache.thrift.protocol.TField UPDATES_FIELD_DESC = new org.apache.thrift.protocol.TField("updates", org.apache.thrift.protocol.TType.LIST, (short)4); +- private static final org.apache.thrift.protocol.TField SERIAL_CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("serial_consistency_level", org.apache.thrift.protocol.TType.I32, (short)5); +- private static final org.apache.thrift.protocol.TField COMMIT_CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("commit_consistency_level", org.apache.thrift.protocol.TType.I32, (short)6); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new cas_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new cas_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public String column_family; // required +- public List expected; // required +- public List updates; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel serial_consistency_level; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel commit_consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COLUMN_FAMILY((short)2, "column_family"), +- EXPECTED((short)3, "expected"), +- UPDATES((short)4, "updates"), +- /** +- * +- * @see ConsistencyLevel +- */ +- SERIAL_CONSISTENCY_LEVEL((short)5, "serial_consistency_level"), +- /** +- * +- * @see ConsistencyLevel +- */ +- COMMIT_CONSISTENCY_LEVEL((short)6, "commit_consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COLUMN_FAMILY +- return COLUMN_FAMILY; +- case 3: // EXPECTED +- return EXPECTED; +- case 4: // UPDATES +- return UPDATES; +- case 5: // SERIAL_CONSISTENCY_LEVEL +- return SERIAL_CONSISTENCY_LEVEL; +- case 6: // COMMIT_CONSISTENCY_LEVEL +- return COMMIT_CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMN_FAMILY, new org.apache.thrift.meta_data.FieldMetaData("column_family", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.EXPECTED, new org.apache.thrift.meta_data.FieldMetaData("expected", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Column.class)))); +- tmpMap.put(_Fields.UPDATES, new org.apache.thrift.meta_data.FieldMetaData("updates", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Column.class)))); +- tmpMap.put(_Fields.SERIAL_CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("serial_consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- tmpMap.put(_Fields.COMMIT_CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("commit_consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cas_args.class, metaDataMap); +- } +- +- public cas_args() { +- this.serial_consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.SERIAL; +- +- this.commit_consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.QUORUM; +- +- } +- +- public cas_args( +- ByteBuffer key, +- String column_family, +- List expected, +- List updates, +- ConsistencyLevel serial_consistency_level, +- ConsistencyLevel commit_consistency_level) +- { +- this(); +- this.key = key; +- this.column_family = column_family; +- this.expected = expected; +- this.updates = updates; +- this.serial_consistency_level = serial_consistency_level; +- this.commit_consistency_level = commit_consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public cas_args(cas_args other) { +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetColumn_family()) { +- this.column_family = other.column_family; +- } +- if (other.isSetExpected()) { +- List __this__expected = new ArrayList(other.expected.size()); +- for (Column other_element : other.expected) { +- __this__expected.add(new Column(other_element)); +- } +- this.expected = __this__expected; +- } +- if (other.isSetUpdates()) { +- List __this__updates = new ArrayList(other.updates.size()); +- for (Column other_element : other.updates) { +- __this__updates.add(new Column(other_element)); +- } +- this.updates = __this__updates; +- } +- if (other.isSetSerial_consistency_level()) { +- this.serial_consistency_level = other.serial_consistency_level; +- } +- if (other.isSetCommit_consistency_level()) { +- this.commit_consistency_level = other.commit_consistency_level; +- } +- } +- +- public cas_args deepCopy() { +- return new cas_args(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.column_family = null; +- this.expected = null; +- this.updates = null; +- this.serial_consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.SERIAL; +- +- this.commit_consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.QUORUM; +- +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public cas_args setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public cas_args setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public String getColumn_family() { +- return this.column_family; +- } +- +- public cas_args setColumn_family(String column_family) { +- this.column_family = column_family; +- return this; +- } +- +- public void unsetColumn_family() { +- this.column_family = null; +- } +- +- /** Returns true if field column_family is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_family() { +- return this.column_family != null; +- } +- +- public void setColumn_familyIsSet(boolean value) { +- if (!value) { +- this.column_family = null; +- } +- } +- +- public int getExpectedSize() { +- return (this.expected == null) ? 0 : this.expected.size(); +- } +- +- public java.util.Iterator getExpectedIterator() { +- return (this.expected == null) ? null : this.expected.iterator(); +- } +- +- public void addToExpected(Column elem) { +- if (this.expected == null) { +- this.expected = new ArrayList(); +- } +- this.expected.add(elem); +- } +- +- public List getExpected() { +- return this.expected; +- } +- +- public cas_args setExpected(List expected) { +- this.expected = expected; +- return this; +- } +- +- public void unsetExpected() { +- this.expected = null; +- } +- +- /** Returns true if field expected is set (has been assigned a value) and false otherwise */ +- public boolean isSetExpected() { +- return this.expected != null; +- } +- +- public void setExpectedIsSet(boolean value) { +- if (!value) { +- this.expected = null; +- } +- } +- +- public int getUpdatesSize() { +- return (this.updates == null) ? 0 : this.updates.size(); +- } +- +- public java.util.Iterator getUpdatesIterator() { +- return (this.updates == null) ? null : this.updates.iterator(); +- } +- +- public void addToUpdates(Column elem) { +- if (this.updates == null) { +- this.updates = new ArrayList(); +- } +- this.updates.add(elem); +- } +- +- public List getUpdates() { +- return this.updates; +- } +- +- public cas_args setUpdates(List updates) { +- this.updates = updates; +- return this; +- } +- +- public void unsetUpdates() { +- this.updates = null; +- } +- +- /** Returns true if field updates is set (has been assigned a value) and false otherwise */ +- public boolean isSetUpdates() { +- return this.updates != null; +- } +- +- public void setUpdatesIsSet(boolean value) { +- if (!value) { +- this.updates = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getSerial_consistency_level() { +- return this.serial_consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public cas_args setSerial_consistency_level(ConsistencyLevel serial_consistency_level) { +- this.serial_consistency_level = serial_consistency_level; +- return this; +- } +- +- public void unsetSerial_consistency_level() { +- this.serial_consistency_level = null; +- } +- +- /** Returns true if field serial_consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetSerial_consistency_level() { +- return this.serial_consistency_level != null; +- } +- +- public void setSerial_consistency_levelIsSet(boolean value) { +- if (!value) { +- this.serial_consistency_level = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getCommit_consistency_level() { +- return this.commit_consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public cas_args setCommit_consistency_level(ConsistencyLevel commit_consistency_level) { +- this.commit_consistency_level = commit_consistency_level; +- return this; +- } +- +- public void unsetCommit_consistency_level() { +- this.commit_consistency_level = null; +- } +- +- /** Returns true if field commit_consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetCommit_consistency_level() { +- return this.commit_consistency_level != null; +- } +- +- public void setCommit_consistency_levelIsSet(boolean value) { +- if (!value) { +- this.commit_consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COLUMN_FAMILY: +- if (value == null) { +- unsetColumn_family(); +- } else { +- setColumn_family((String)value); +- } +- break; +- +- case EXPECTED: +- if (value == null) { +- unsetExpected(); +- } else { +- setExpected((List)value); +- } +- break; +- +- case UPDATES: +- if (value == null) { +- unsetUpdates(); +- } else { +- setUpdates((List)value); +- } +- break; +- +- case SERIAL_CONSISTENCY_LEVEL: +- if (value == null) { +- unsetSerial_consistency_level(); +- } else { +- setSerial_consistency_level((ConsistencyLevel)value); +- } +- break; +- +- case COMMIT_CONSISTENCY_LEVEL: +- if (value == null) { +- unsetCommit_consistency_level(); +- } else { +- setCommit_consistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COLUMN_FAMILY: +- return getColumn_family(); +- +- case EXPECTED: +- return getExpected(); +- +- case UPDATES: +- return getUpdates(); +- +- case SERIAL_CONSISTENCY_LEVEL: +- return getSerial_consistency_level(); +- +- case COMMIT_CONSISTENCY_LEVEL: +- return getCommit_consistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COLUMN_FAMILY: +- return isSetColumn_family(); +- case EXPECTED: +- return isSetExpected(); +- case UPDATES: +- return isSetUpdates(); +- case SERIAL_CONSISTENCY_LEVEL: +- return isSetSerial_consistency_level(); +- case COMMIT_CONSISTENCY_LEVEL: +- return isSetCommit_consistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof cas_args) +- return this.equals((cas_args)that); +- return false; +- } +- +- public boolean equals(cas_args that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_column_family = true && this.isSetColumn_family(); +- boolean that_present_column_family = true && that.isSetColumn_family(); +- if (this_present_column_family || that_present_column_family) { +- if (!(this_present_column_family && that_present_column_family)) +- return false; +- if (!this.column_family.equals(that.column_family)) +- return false; +- } +- +- boolean this_present_expected = true && this.isSetExpected(); +- boolean that_present_expected = true && that.isSetExpected(); +- if (this_present_expected || that_present_expected) { +- if (!(this_present_expected && that_present_expected)) +- return false; +- if (!this.expected.equals(that.expected)) +- return false; +- } +- +- boolean this_present_updates = true && this.isSetUpdates(); +- boolean that_present_updates = true && that.isSetUpdates(); +- if (this_present_updates || that_present_updates) { +- if (!(this_present_updates && that_present_updates)) +- return false; +- if (!this.updates.equals(that.updates)) +- return false; +- } +- +- boolean this_present_serial_consistency_level = true && this.isSetSerial_consistency_level(); +- boolean that_present_serial_consistency_level = true && that.isSetSerial_consistency_level(); +- if (this_present_serial_consistency_level || that_present_serial_consistency_level) { +- if (!(this_present_serial_consistency_level && that_present_serial_consistency_level)) +- return false; +- if (!this.serial_consistency_level.equals(that.serial_consistency_level)) +- return false; +- } +- +- boolean this_present_commit_consistency_level = true && this.isSetCommit_consistency_level(); +- boolean that_present_commit_consistency_level = true && that.isSetCommit_consistency_level(); +- if (this_present_commit_consistency_level || that_present_commit_consistency_level) { +- if (!(this_present_commit_consistency_level && that_present_commit_consistency_level)) +- return false; +- if (!this.commit_consistency_level.equals(that.commit_consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_column_family = true && (isSetColumn_family()); +- builder.append(present_column_family); +- if (present_column_family) +- builder.append(column_family); +- +- boolean present_expected = true && (isSetExpected()); +- builder.append(present_expected); +- if (present_expected) +- builder.append(expected); +- +- boolean present_updates = true && (isSetUpdates()); +- builder.append(present_updates); +- if (present_updates) +- builder.append(updates); +- +- boolean present_serial_consistency_level = true && (isSetSerial_consistency_level()); +- builder.append(present_serial_consistency_level); +- if (present_serial_consistency_level) +- builder.append(serial_consistency_level.getValue()); +- +- boolean present_commit_consistency_level = true && (isSetCommit_consistency_level()); +- builder.append(present_commit_consistency_level); +- if (present_commit_consistency_level) +- builder.append(commit_consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(cas_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_family()).compareTo(other.isSetColumn_family()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_family()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_family, other.column_family); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetExpected()).compareTo(other.isSetExpected()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetExpected()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.expected, other.expected); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUpdates()).compareTo(other.isSetUpdates()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUpdates()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.updates, other.updates); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSerial_consistency_level()).compareTo(other.isSetSerial_consistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSerial_consistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serial_consistency_level, other.serial_consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCommit_consistency_level()).compareTo(other.isSetCommit_consistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCommit_consistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.commit_consistency_level, other.commit_consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("cas_args("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column_family:"); +- if (this.column_family == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_family); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("expected:"); +- if (this.expected == null) { +- sb.append("null"); +- } else { +- sb.append(this.expected); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("updates:"); +- if (this.updates == null) { +- sb.append("null"); +- } else { +- sb.append(this.updates); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("serial_consistency_level:"); +- if (this.serial_consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.serial_consistency_level); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("commit_consistency_level:"); +- if (this.commit_consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.commit_consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- if (column_family == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_family' was not present! Struct: " + toString()); +- } +- if (serial_consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'serial_consistency_level' was not present! Struct: " + toString()); +- } +- if (commit_consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'commit_consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class cas_argsStandardSchemeFactory implements SchemeFactory { +- public cas_argsStandardScheme getScheme() { +- return new cas_argsStandardScheme(); +- } +- } +- +- private static class cas_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, cas_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMN_FAMILY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.column_family = iprot.readString(); +- struct.setColumn_familyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // EXPECTED +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list300 = iprot.readListBegin(); +- struct.expected = new ArrayList(_list300.size); +- for (int _i301 = 0; _i301 < _list300.size; ++_i301) +- { +- Column _elem302; +- _elem302 = new Column(); +- _elem302.read(iprot); +- struct.expected.add(_elem302); +- } +- iprot.readListEnd(); +- } +- struct.setExpectedIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // UPDATES +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list303 = iprot.readListBegin(); +- struct.updates = new ArrayList(_list303.size); +- for (int _i304 = 0; _i304 < _list303.size; ++_i304) +- { +- Column _elem305; +- _elem305 = new Column(); +- _elem305.read(iprot); +- struct.updates.add(_elem305); +- } +- iprot.readListEnd(); +- } +- struct.setUpdatesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 5: // SERIAL_CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.serial_consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setSerial_consistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 6: // COMMIT_CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.commit_consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setCommit_consistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, cas_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- if (struct.column_family != null) { +- oprot.writeFieldBegin(COLUMN_FAMILY_FIELD_DESC); +- oprot.writeString(struct.column_family); +- oprot.writeFieldEnd(); +- } +- if (struct.expected != null) { +- oprot.writeFieldBegin(EXPECTED_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.expected.size())); +- for (Column _iter306 : struct.expected) +- { +- _iter306.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.updates != null) { +- oprot.writeFieldBegin(UPDATES_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.updates.size())); +- for (Column _iter307 : struct.updates) +- { +- _iter307.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.serial_consistency_level != null) { +- oprot.writeFieldBegin(SERIAL_CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.serial_consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- if (struct.commit_consistency_level != null) { +- oprot.writeFieldBegin(COMMIT_CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.commit_consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class cas_argsTupleSchemeFactory implements SchemeFactory { +- public cas_argsTupleScheme getScheme() { +- return new cas_argsTupleScheme(); +- } +- } +- +- private static class cas_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, cas_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- oprot.writeString(struct.column_family); +- oprot.writeI32(struct.serial_consistency_level.getValue()); +- oprot.writeI32(struct.commit_consistency_level.getValue()); +- BitSet optionals = new BitSet(); +- if (struct.isSetExpected()) { +- optionals.set(0); +- } +- if (struct.isSetUpdates()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetExpected()) { +- { +- oprot.writeI32(struct.expected.size()); +- for (Column _iter308 : struct.expected) +- { +- _iter308.write(oprot); +- } +- } +- } +- if (struct.isSetUpdates()) { +- { +- oprot.writeI32(struct.updates.size()); +- for (Column _iter309 : struct.updates) +- { +- _iter309.write(oprot); +- } +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, cas_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- struct.column_family = iprot.readString(); +- struct.setColumn_familyIsSet(true); +- struct.serial_consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setSerial_consistency_levelIsSet(true); +- struct.commit_consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setCommit_consistency_levelIsSet(true); +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list310 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.expected = new ArrayList(_list310.size); +- for (int _i311 = 0; _i311 < _list310.size; ++_i311) +- { +- Column _elem312; +- _elem312 = new Column(); +- _elem312.read(iprot); +- struct.expected.add(_elem312); +- } +- } +- struct.setExpectedIsSet(true); +- } +- if (incoming.get(1)) { +- { +- org.apache.thrift.protocol.TList _list313 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.updates = new ArrayList(_list313.size); +- for (int _i314 = 0; _i314 < _list313.size; ++_i314) +- { +- Column _elem315; +- _elem315 = new Column(); +- _elem315.read(iprot); +- struct.updates.add(_elem315); +- } +- } +- struct.setUpdatesIsSet(true); +- } +- } +- } +- +- } +- +- public static class cas_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cas_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new cas_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new cas_resultTupleSchemeFactory()); +- } +- +- public CASResult success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CASResult.class))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cas_result.class, metaDataMap); +- } +- +- public cas_result() { +- } +- +- public cas_result( +- CASResult success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public cas_result(cas_result other) { +- if (other.isSetSuccess()) { +- this.success = new CASResult(other.success); +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public cas_result deepCopy() { +- return new cas_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public CASResult getSuccess() { +- return this.success; +- } +- +- public cas_result setSuccess(CASResult success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public cas_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public cas_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public cas_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((CASResult)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof cas_result) +- return this.equals((cas_result)that); +- return false; +- } +- +- public boolean equals(cas_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(cas_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("cas_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (success != null) { +- success.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class cas_resultStandardSchemeFactory implements SchemeFactory { +- public cas_resultStandardScheme getScheme() { +- return new cas_resultStandardScheme(); +- } +- } +- +- private static class cas_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, cas_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.success = new CASResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, cas_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- struct.success.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class cas_resultTupleSchemeFactory implements SchemeFactory { +- public cas_resultTupleScheme getScheme() { +- return new cas_resultTupleScheme(); +- } +- } +- +- private static class cas_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, cas_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- oprot.writeBitSet(optionals, 4); +- if (struct.isSetSuccess()) { +- struct.success.write(oprot); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, cas_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(4); +- if (incoming.get(0)) { +- struct.success = new CASResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class remove_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("remove_args"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMN_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("column_path", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new remove_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new remove_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public ColumnPath column_path; // required +- public long timestamp; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COLUMN_PATH((short)2, "column_path"), +- TIMESTAMP((short)3, "timestamp"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)4, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COLUMN_PATH +- return COLUMN_PATH; +- case 3: // TIMESTAMP +- return TIMESTAMP; +- case 4: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __TIMESTAMP_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMN_PATH, new org.apache.thrift.meta_data.FieldMetaData("column_path", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnPath.class))); +- tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(remove_args.class, metaDataMap); +- } +- +- public remove_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public remove_args( +- ByteBuffer key, +- ColumnPath column_path, +- long timestamp, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.key = key; +- this.column_path = column_path; +- this.timestamp = timestamp; +- setTimestampIsSet(true); +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public remove_args(remove_args other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetColumn_path()) { +- this.column_path = new ColumnPath(other.column_path); +- } +- this.timestamp = other.timestamp; +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public remove_args deepCopy() { +- return new remove_args(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.column_path = null; +- setTimestampIsSet(false); +- this.timestamp = 0; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public remove_args setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public remove_args setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public ColumnPath getColumn_path() { +- return this.column_path; +- } +- +- public remove_args setColumn_path(ColumnPath column_path) { +- this.column_path = column_path; +- return this; +- } +- +- public void unsetColumn_path() { +- this.column_path = null; +- } +- +- /** Returns true if field column_path is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_path() { +- return this.column_path != null; +- } +- +- public void setColumn_pathIsSet(boolean value) { +- if (!value) { +- this.column_path = null; +- } +- } +- +- public long getTimestamp() { +- return this.timestamp; +- } +- +- public remove_args setTimestamp(long timestamp) { +- this.timestamp = timestamp; +- setTimestampIsSet(true); +- return this; +- } +- +- public void unsetTimestamp() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID); +- } +- +- /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */ +- public boolean isSetTimestamp() { +- return EncodingUtils.testBit(__isset_bitfield, __TIMESTAMP_ISSET_ID); +- } +- +- public void setTimestampIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value); +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public remove_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COLUMN_PATH: +- if (value == null) { +- unsetColumn_path(); +- } else { +- setColumn_path((ColumnPath)value); +- } +- break; +- +- case TIMESTAMP: +- if (value == null) { +- unsetTimestamp(); +- } else { +- setTimestamp((Long)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COLUMN_PATH: +- return getColumn_path(); +- +- case TIMESTAMP: +- return Long.valueOf(getTimestamp()); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COLUMN_PATH: +- return isSetColumn_path(); +- case TIMESTAMP: +- return isSetTimestamp(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof remove_args) +- return this.equals((remove_args)that); +- return false; +- } +- +- public boolean equals(remove_args that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_column_path = true && this.isSetColumn_path(); +- boolean that_present_column_path = true && that.isSetColumn_path(); +- if (this_present_column_path || that_present_column_path) { +- if (!(this_present_column_path && that_present_column_path)) +- return false; +- if (!this.column_path.equals(that.column_path)) +- return false; +- } +- +- boolean this_present_timestamp = true; +- boolean that_present_timestamp = true; +- if (this_present_timestamp || that_present_timestamp) { +- if (!(this_present_timestamp && that_present_timestamp)) +- return false; +- if (this.timestamp != that.timestamp) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_column_path = true && (isSetColumn_path()); +- builder.append(present_column_path); +- if (present_column_path) +- builder.append(column_path); +- +- boolean present_timestamp = true; +- builder.append(present_timestamp); +- if (present_timestamp) +- builder.append(timestamp); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(remove_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_path()).compareTo(other.isSetColumn_path()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_path()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_path, other.column_path); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTimestamp()).compareTo(other.isSetTimestamp()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTimestamp()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timestamp, other.timestamp); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("remove_args("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("column_path:"); +- if (this.column_path == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_path); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("timestamp:"); +- sb.append(this.timestamp); +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- if (column_path == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_path' was not present! Struct: " + toString()); +- } +- // alas, we cannot check 'timestamp' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- if (column_path != null) { +- column_path.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class remove_argsStandardSchemeFactory implements SchemeFactory { +- public remove_argsStandardScheme getScheme() { +- return new remove_argsStandardScheme(); +- } +- } +- +- private static class remove_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, remove_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMN_PATH +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_path = new ColumnPath(); +- struct.column_path.read(iprot); +- struct.setColumn_pathIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TIMESTAMP +- if (schemeField.type == org.apache.thrift.protocol.TType.I64) { +- struct.timestamp = iprot.readI64(); +- struct.setTimestampIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetTimestamp()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'timestamp' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, remove_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- if (struct.column_path != null) { +- oprot.writeFieldBegin(COLUMN_PATH_FIELD_DESC); +- struct.column_path.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldBegin(TIMESTAMP_FIELD_DESC); +- oprot.writeI64(struct.timestamp); +- oprot.writeFieldEnd(); +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class remove_argsTupleSchemeFactory implements SchemeFactory { +- public remove_argsTupleScheme getScheme() { +- return new remove_argsTupleScheme(); +- } +- } +- +- private static class remove_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, remove_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- struct.column_path.write(oprot); +- oprot.writeI64(struct.timestamp); +- BitSet optionals = new BitSet(); +- if (struct.isSetConsistency_level()) { +- optionals.set(0); +- } +- oprot.writeBitSet(optionals, 1); +- if (struct.isSetConsistency_level()) { +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, remove_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- struct.column_path = new ColumnPath(); +- struct.column_path.read(iprot); +- struct.setColumn_pathIsSet(true); +- struct.timestamp = iprot.readI64(); +- struct.setTimestampIsSet(true); +- BitSet incoming = iprot.readBitSet(1); +- if (incoming.get(0)) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- } +- +- } +- +- public static class remove_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("remove_result"); +- +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new remove_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new remove_resultTupleSchemeFactory()); +- } +- +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(remove_result.class, metaDataMap); +- } +- +- public remove_result() { +- } +- +- public remove_result( +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public remove_result(remove_result other) { +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public remove_result deepCopy() { +- return new remove_result(this); +- } +- +- @Override +- public void clear() { +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public remove_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public remove_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public remove_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof remove_result) +- return this.equals((remove_result)that); +- return false; +- } +- +- public boolean equals(remove_result that) { +- if (that == null) +- return false; +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(remove_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("remove_result("); +- boolean first = true; +- +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class remove_resultStandardSchemeFactory implements SchemeFactory { +- public remove_resultStandardScheme getScheme() { +- return new remove_resultStandardScheme(); +- } +- } +- +- private static class remove_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, remove_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, remove_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class remove_resultTupleSchemeFactory implements SchemeFactory { +- public remove_resultTupleScheme getScheme() { +- return new remove_resultTupleScheme(); +- } +- } +- +- private static class remove_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, remove_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetIre()) { +- optionals.set(0); +- } +- if (struct.isSetUe()) { +- optionals.set(1); +- } +- if (struct.isSetTe()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, remove_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(2)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class remove_counter_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("remove_counter_args"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("path", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new remove_counter_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new remove_counter_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public ColumnPath path; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- PATH((short)2, "path"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)3, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // PATH +- return PATH; +- case 3: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.PATH, new org.apache.thrift.meta_data.FieldMetaData("path", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnPath.class))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(remove_counter_args.class, metaDataMap); +- } +- +- public remove_counter_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public remove_counter_args( +- ByteBuffer key, +- ColumnPath path, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.key = key; +- this.path = path; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public remove_counter_args(remove_counter_args other) { +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetPath()) { +- this.path = new ColumnPath(other.path); +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public remove_counter_args deepCopy() { +- return new remove_counter_args(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.path = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public remove_counter_args setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public remove_counter_args setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public ColumnPath getPath() { +- return this.path; +- } +- +- public remove_counter_args setPath(ColumnPath path) { +- this.path = path; +- return this; +- } +- +- public void unsetPath() { +- this.path = null; +- } +- +- /** Returns true if field path is set (has been assigned a value) and false otherwise */ +- public boolean isSetPath() { +- return this.path != null; +- } +- +- public void setPathIsSet(boolean value) { +- if (!value) { +- this.path = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public remove_counter_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case PATH: +- if (value == null) { +- unsetPath(); +- } else { +- setPath((ColumnPath)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case PATH: +- return getPath(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case PATH: +- return isSetPath(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof remove_counter_args) +- return this.equals((remove_counter_args)that); +- return false; +- } +- +- public boolean equals(remove_counter_args that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_path = true && this.isSetPath(); +- boolean that_present_path = true && that.isSetPath(); +- if (this_present_path || that_present_path) { +- if (!(this_present_path && that_present_path)) +- return false; +- if (!this.path.equals(that.path)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_path = true && (isSetPath()); +- builder.append(present_path); +- if (present_path) +- builder.append(path); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(remove_counter_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetPath()).compareTo(other.isSetPath()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetPath()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.path, other.path); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("remove_counter_args("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("path:"); +- if (this.path == null) { +- sb.append("null"); +- } else { +- sb.append(this.path); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- if (path == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'path' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (path != null) { +- path.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class remove_counter_argsStandardSchemeFactory implements SchemeFactory { +- public remove_counter_argsStandardScheme getScheme() { +- return new remove_counter_argsStandardScheme(); +- } +- } +- +- private static class remove_counter_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, remove_counter_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // PATH +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.path = new ColumnPath(); +- struct.path.read(iprot); +- struct.setPathIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, remove_counter_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- if (struct.path != null) { +- oprot.writeFieldBegin(PATH_FIELD_DESC); +- struct.path.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class remove_counter_argsTupleSchemeFactory implements SchemeFactory { +- public remove_counter_argsTupleScheme getScheme() { +- return new remove_counter_argsTupleScheme(); +- } +- } +- +- private static class remove_counter_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, remove_counter_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- struct.path.write(oprot); +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, remove_counter_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- struct.path = new ColumnPath(); +- struct.path.read(iprot); +- struct.setPathIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class remove_counter_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("remove_counter_result"); +- +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new remove_counter_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new remove_counter_resultTupleSchemeFactory()); +- } +- +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(remove_counter_result.class, metaDataMap); +- } +- +- public remove_counter_result() { +- } +- +- public remove_counter_result( +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public remove_counter_result(remove_counter_result other) { +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public remove_counter_result deepCopy() { +- return new remove_counter_result(this); +- } +- +- @Override +- public void clear() { +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public remove_counter_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public remove_counter_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public remove_counter_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof remove_counter_result) +- return this.equals((remove_counter_result)that); +- return false; +- } +- +- public boolean equals(remove_counter_result that) { +- if (that == null) +- return false; +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(remove_counter_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("remove_counter_result("); +- boolean first = true; +- +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class remove_counter_resultStandardSchemeFactory implements SchemeFactory { +- public remove_counter_resultStandardScheme getScheme() { +- return new remove_counter_resultStandardScheme(); +- } +- } +- +- private static class remove_counter_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, remove_counter_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, remove_counter_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class remove_counter_resultTupleSchemeFactory implements SchemeFactory { +- public remove_counter_resultTupleScheme getScheme() { +- return new remove_counter_resultTupleScheme(); +- } +- } +- +- private static class remove_counter_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, remove_counter_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetIre()) { +- optionals.set(0); +- } +- if (struct.isSetUe()) { +- optionals.set(1); +- } +- if (struct.isSetTe()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, remove_counter_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(2)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class batch_mutate_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("batch_mutate_args"); +- +- private static final org.apache.thrift.protocol.TField MUTATION_MAP_FIELD_DESC = new org.apache.thrift.protocol.TField("mutation_map", org.apache.thrift.protocol.TType.MAP, (short)1); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new batch_mutate_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new batch_mutate_argsTupleSchemeFactory()); +- } +- +- public Map>> mutation_map; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- MUTATION_MAP((short)1, "mutation_map"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)2, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // MUTATION_MAP +- return MUTATION_MAP; +- case 2: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.MUTATION_MAP, new org.apache.thrift.meta_data.FieldMetaData("mutation_map", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true), +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Mutation.class)))))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(batch_mutate_args.class, metaDataMap); +- } +- +- public batch_mutate_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public batch_mutate_args( +- Map>> mutation_map, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.mutation_map = mutation_map; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public batch_mutate_args(batch_mutate_args other) { +- if (other.isSetMutation_map()) { +- Map>> __this__mutation_map = new HashMap>>(other.mutation_map.size()); +- for (Map.Entry>> other_element : other.mutation_map.entrySet()) { +- +- ByteBuffer other_element_key = other_element.getKey(); +- Map> other_element_value = other_element.getValue(); +- +- ByteBuffer __this__mutation_map_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key); +-; +- +- Map> __this__mutation_map_copy_value = new HashMap>(other_element_value.size()); +- for (Map.Entry> other_element_value_element : other_element_value.entrySet()) { +- +- String other_element_value_element_key = other_element_value_element.getKey(); +- List other_element_value_element_value = other_element_value_element.getValue(); +- +- String __this__mutation_map_copy_value_copy_key = other_element_value_element_key; +- +- List __this__mutation_map_copy_value_copy_value = new ArrayList(other_element_value_element_value.size()); +- for (Mutation other_element_value_element_value_element : other_element_value_element_value) { +- __this__mutation_map_copy_value_copy_value.add(new Mutation(other_element_value_element_value_element)); +- } +- +- __this__mutation_map_copy_value.put(__this__mutation_map_copy_value_copy_key, __this__mutation_map_copy_value_copy_value); +- } +- +- __this__mutation_map.put(__this__mutation_map_copy_key, __this__mutation_map_copy_value); +- } +- this.mutation_map = __this__mutation_map; +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public batch_mutate_args deepCopy() { +- return new batch_mutate_args(this); +- } +- +- @Override +- public void clear() { +- this.mutation_map = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public int getMutation_mapSize() { +- return (this.mutation_map == null) ? 0 : this.mutation_map.size(); +- } +- +- public void putToMutation_map(ByteBuffer key, Map> val) { +- if (this.mutation_map == null) { +- this.mutation_map = new HashMap>>(); +- } +- this.mutation_map.put(key, val); +- } +- +- public Map>> getMutation_map() { +- return this.mutation_map; +- } +- +- public batch_mutate_args setMutation_map(Map>> mutation_map) { +- this.mutation_map = mutation_map; +- return this; +- } +- +- public void unsetMutation_map() { +- this.mutation_map = null; +- } +- +- /** Returns true if field mutation_map is set (has been assigned a value) and false otherwise */ +- public boolean isSetMutation_map() { +- return this.mutation_map != null; +- } +- +- public void setMutation_mapIsSet(boolean value) { +- if (!value) { +- this.mutation_map = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public batch_mutate_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case MUTATION_MAP: +- if (value == null) { +- unsetMutation_map(); +- } else { +- setMutation_map((Map>>)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case MUTATION_MAP: +- return getMutation_map(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case MUTATION_MAP: +- return isSetMutation_map(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof batch_mutate_args) +- return this.equals((batch_mutate_args)that); +- return false; +- } +- +- public boolean equals(batch_mutate_args that) { +- if (that == null) +- return false; +- +- boolean this_present_mutation_map = true && this.isSetMutation_map(); +- boolean that_present_mutation_map = true && that.isSetMutation_map(); +- if (this_present_mutation_map || that_present_mutation_map) { +- if (!(this_present_mutation_map && that_present_mutation_map)) +- return false; +- if (!this.mutation_map.equals(that.mutation_map)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_mutation_map = true && (isSetMutation_map()); +- builder.append(present_mutation_map); +- if (present_mutation_map) +- builder.append(mutation_map); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(batch_mutate_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetMutation_map()).compareTo(other.isSetMutation_map()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMutation_map()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mutation_map, other.mutation_map); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("batch_mutate_args("); +- boolean first = true; +- +- sb.append("mutation_map:"); +- if (this.mutation_map == null) { +- sb.append("null"); +- } else { +- sb.append(this.mutation_map); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (mutation_map == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'mutation_map' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class batch_mutate_argsStandardSchemeFactory implements SchemeFactory { +- public batch_mutate_argsStandardScheme getScheme() { +- return new batch_mutate_argsStandardScheme(); +- } +- } +- +- private static class batch_mutate_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, batch_mutate_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // MUTATION_MAP +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map316 = iprot.readMapBegin(); +- struct.mutation_map = new HashMap>>(2*_map316.size); +- for (int _i317 = 0; _i317 < _map316.size; ++_i317) +- { +- ByteBuffer _key318; +- Map> _val319; +- _key318 = iprot.readBinary(); +- { +- org.apache.thrift.protocol.TMap _map320 = iprot.readMapBegin(); +- _val319 = new HashMap>(2*_map320.size); +- for (int _i321 = 0; _i321 < _map320.size; ++_i321) +- { +- String _key322; +- List _val323; +- _key322 = iprot.readString(); +- { +- org.apache.thrift.protocol.TList _list324 = iprot.readListBegin(); +- _val323 = new ArrayList(_list324.size); +- for (int _i325 = 0; _i325 < _list324.size; ++_i325) +- { +- Mutation _elem326; +- _elem326 = new Mutation(); +- _elem326.read(iprot); +- _val323.add(_elem326); +- } +- iprot.readListEnd(); +- } +- _val319.put(_key322, _val323); +- } +- iprot.readMapEnd(); +- } +- struct.mutation_map.put(_key318, _val319); +- } +- iprot.readMapEnd(); +- } +- struct.setMutation_mapIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, batch_mutate_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.mutation_map != null) { +- oprot.writeFieldBegin(MUTATION_MAP_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.mutation_map.size())); +- for (Map.Entry>> _iter327 : struct.mutation_map.entrySet()) +- { +- oprot.writeBinary(_iter327.getKey()); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, _iter327.getValue().size())); +- for (Map.Entry> _iter328 : _iter327.getValue().entrySet()) +- { +- oprot.writeString(_iter328.getKey()); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter328.getValue().size())); +- for (Mutation _iter329 : _iter328.getValue()) +- { +- _iter329.write(oprot); +- } +- oprot.writeListEnd(); +- } +- } +- oprot.writeMapEnd(); +- } +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class batch_mutate_argsTupleSchemeFactory implements SchemeFactory { +- public batch_mutate_argsTupleScheme getScheme() { +- return new batch_mutate_argsTupleScheme(); +- } +- } +- +- private static class batch_mutate_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, batch_mutate_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- { +- oprot.writeI32(struct.mutation_map.size()); +- for (Map.Entry>> _iter330 : struct.mutation_map.entrySet()) +- { +- oprot.writeBinary(_iter330.getKey()); +- { +- oprot.writeI32(_iter330.getValue().size()); +- for (Map.Entry> _iter331 : _iter330.getValue().entrySet()) +- { +- oprot.writeString(_iter331.getKey()); +- { +- oprot.writeI32(_iter331.getValue().size()); +- for (Mutation _iter332 : _iter331.getValue()) +- { +- _iter332.write(oprot); +- } +- } +- } +- } +- } +- } +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, batch_mutate_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- { +- org.apache.thrift.protocol.TMap _map333 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); +- struct.mutation_map = new HashMap>>(2*_map333.size); +- for (int _i334 = 0; _i334 < _map333.size; ++_i334) +- { +- ByteBuffer _key335; +- Map> _val336; +- _key335 = iprot.readBinary(); +- { +- org.apache.thrift.protocol.TMap _map337 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); +- _val336 = new HashMap>(2*_map337.size); +- for (int _i338 = 0; _i338 < _map337.size; ++_i338) +- { +- String _key339; +- List _val340; +- _key339 = iprot.readString(); +- { +- org.apache.thrift.protocol.TList _list341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- _val340 = new ArrayList(_list341.size); +- for (int _i342 = 0; _i342 < _list341.size; ++_i342) +- { +- Mutation _elem343; +- _elem343 = new Mutation(); +- _elem343.read(iprot); +- _val340.add(_elem343); +- } +- } +- _val336.put(_key339, _val340); +- } +- } +- struct.mutation_map.put(_key335, _val336); +- } +- } +- struct.setMutation_mapIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class batch_mutate_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("batch_mutate_result"); +- +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new batch_mutate_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new batch_mutate_resultTupleSchemeFactory()); +- } +- +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(batch_mutate_result.class, metaDataMap); +- } +- +- public batch_mutate_result() { +- } +- +- public batch_mutate_result( +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public batch_mutate_result(batch_mutate_result other) { +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public batch_mutate_result deepCopy() { +- return new batch_mutate_result(this); +- } +- +- @Override +- public void clear() { +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public batch_mutate_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public batch_mutate_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public batch_mutate_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof batch_mutate_result) +- return this.equals((batch_mutate_result)that); +- return false; +- } +- +- public boolean equals(batch_mutate_result that) { +- if (that == null) +- return false; +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(batch_mutate_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("batch_mutate_result("); +- boolean first = true; +- +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class batch_mutate_resultStandardSchemeFactory implements SchemeFactory { +- public batch_mutate_resultStandardScheme getScheme() { +- return new batch_mutate_resultStandardScheme(); +- } +- } +- +- private static class batch_mutate_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, batch_mutate_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, batch_mutate_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class batch_mutate_resultTupleSchemeFactory implements SchemeFactory { +- public batch_mutate_resultTupleScheme getScheme() { +- return new batch_mutate_resultTupleScheme(); +- } +- } +- +- private static class batch_mutate_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, batch_mutate_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetIre()) { +- optionals.set(0); +- } +- if (struct.isSetUe()) { +- optionals.set(1); +- } +- if (struct.isSetTe()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, batch_mutate_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(2)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class atomic_batch_mutate_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("atomic_batch_mutate_args"); +- +- private static final org.apache.thrift.protocol.TField MUTATION_MAP_FIELD_DESC = new org.apache.thrift.protocol.TField("mutation_map", org.apache.thrift.protocol.TType.MAP, (short)1); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new atomic_batch_mutate_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new atomic_batch_mutate_argsTupleSchemeFactory()); +- } +- +- public Map>> mutation_map; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- MUTATION_MAP((short)1, "mutation_map"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)2, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // MUTATION_MAP +- return MUTATION_MAP; +- case 2: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.MUTATION_MAP, new org.apache.thrift.meta_data.FieldMetaData("mutation_map", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true), +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Mutation.class)))))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(atomic_batch_mutate_args.class, metaDataMap); +- } +- +- public atomic_batch_mutate_args() { +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public atomic_batch_mutate_args( +- Map>> mutation_map, +- ConsistencyLevel consistency_level) +- { +- this(); +- this.mutation_map = mutation_map; +- this.consistency_level = consistency_level; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public atomic_batch_mutate_args(atomic_batch_mutate_args other) { +- if (other.isSetMutation_map()) { +- Map>> __this__mutation_map = new HashMap>>(other.mutation_map.size()); +- for (Map.Entry>> other_element : other.mutation_map.entrySet()) { +- +- ByteBuffer other_element_key = other_element.getKey(); +- Map> other_element_value = other_element.getValue(); +- +- ByteBuffer __this__mutation_map_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key); +-; +- +- Map> __this__mutation_map_copy_value = new HashMap>(other_element_value.size()); +- for (Map.Entry> other_element_value_element : other_element_value.entrySet()) { +- +- String other_element_value_element_key = other_element_value_element.getKey(); +- List other_element_value_element_value = other_element_value_element.getValue(); +- +- String __this__mutation_map_copy_value_copy_key = other_element_value_element_key; +- +- List __this__mutation_map_copy_value_copy_value = new ArrayList(other_element_value_element_value.size()); +- for (Mutation other_element_value_element_value_element : other_element_value_element_value) { +- __this__mutation_map_copy_value_copy_value.add(new Mutation(other_element_value_element_value_element)); +- } +- +- __this__mutation_map_copy_value.put(__this__mutation_map_copy_value_copy_key, __this__mutation_map_copy_value_copy_value); +- } +- +- __this__mutation_map.put(__this__mutation_map_copy_key, __this__mutation_map_copy_value); +- } +- this.mutation_map = __this__mutation_map; +- } +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public atomic_batch_mutate_args deepCopy() { +- return new atomic_batch_mutate_args(this); +- } +- +- @Override +- public void clear() { +- this.mutation_map = null; +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public int getMutation_mapSize() { +- return (this.mutation_map == null) ? 0 : this.mutation_map.size(); +- } +- +- public void putToMutation_map(ByteBuffer key, Map> val) { +- if (this.mutation_map == null) { +- this.mutation_map = new HashMap>>(); +- } +- this.mutation_map.put(key, val); +- } +- +- public Map>> getMutation_map() { +- return this.mutation_map; +- } +- +- public atomic_batch_mutate_args setMutation_map(Map>> mutation_map) { +- this.mutation_map = mutation_map; +- return this; +- } +- +- public void unsetMutation_map() { +- this.mutation_map = null; +- } +- +- /** Returns true if field mutation_map is set (has been assigned a value) and false otherwise */ +- public boolean isSetMutation_map() { +- return this.mutation_map != null; +- } +- +- public void setMutation_mapIsSet(boolean value) { +- if (!value) { +- this.mutation_map = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public atomic_batch_mutate_args setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case MUTATION_MAP: +- if (value == null) { +- unsetMutation_map(); +- } else { +- setMutation_map((Map>>)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case MUTATION_MAP: +- return getMutation_map(); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case MUTATION_MAP: +- return isSetMutation_map(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof atomic_batch_mutate_args) +- return this.equals((atomic_batch_mutate_args)that); +- return false; +- } +- +- public boolean equals(atomic_batch_mutate_args that) { +- if (that == null) +- return false; +- +- boolean this_present_mutation_map = true && this.isSetMutation_map(); +- boolean that_present_mutation_map = true && that.isSetMutation_map(); +- if (this_present_mutation_map || that_present_mutation_map) { +- if (!(this_present_mutation_map && that_present_mutation_map)) +- return false; +- if (!this.mutation_map.equals(that.mutation_map)) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_mutation_map = true && (isSetMutation_map()); +- builder.append(present_mutation_map); +- if (present_mutation_map) +- builder.append(mutation_map); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(atomic_batch_mutate_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetMutation_map()).compareTo(other.isSetMutation_map()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMutation_map()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mutation_map, other.mutation_map); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("atomic_batch_mutate_args("); +- boolean first = true; +- +- sb.append("mutation_map:"); +- if (this.mutation_map == null) { +- sb.append("null"); +- } else { +- sb.append(this.mutation_map); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (mutation_map == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'mutation_map' was not present! Struct: " + toString()); +- } +- if (consistency_level == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency_level' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class atomic_batch_mutate_argsStandardSchemeFactory implements SchemeFactory { +- public atomic_batch_mutate_argsStandardScheme getScheme() { +- return new atomic_batch_mutate_argsStandardScheme(); +- } +- } +- +- private static class atomic_batch_mutate_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, atomic_batch_mutate_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // MUTATION_MAP +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map344 = iprot.readMapBegin(); +- struct.mutation_map = new HashMap>>(2*_map344.size); +- for (int _i345 = 0; _i345 < _map344.size; ++_i345) +- { +- ByteBuffer _key346; +- Map> _val347; +- _key346 = iprot.readBinary(); +- { +- org.apache.thrift.protocol.TMap _map348 = iprot.readMapBegin(); +- _val347 = new HashMap>(2*_map348.size); +- for (int _i349 = 0; _i349 < _map348.size; ++_i349) +- { +- String _key350; +- List _val351; +- _key350 = iprot.readString(); +- { +- org.apache.thrift.protocol.TList _list352 = iprot.readListBegin(); +- _val351 = new ArrayList(_list352.size); +- for (int _i353 = 0; _i353 < _list352.size; ++_i353) +- { +- Mutation _elem354; +- _elem354 = new Mutation(); +- _elem354.read(iprot); +- _val351.add(_elem354); +- } +- iprot.readListEnd(); +- } +- _val347.put(_key350, _val351); +- } +- iprot.readMapEnd(); +- } +- struct.mutation_map.put(_key346, _val347); +- } +- iprot.readMapEnd(); +- } +- struct.setMutation_mapIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, atomic_batch_mutate_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.mutation_map != null) { +- oprot.writeFieldBegin(MUTATION_MAP_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.mutation_map.size())); +- for (Map.Entry>> _iter355 : struct.mutation_map.entrySet()) +- { +- oprot.writeBinary(_iter355.getKey()); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, _iter355.getValue().size())); +- for (Map.Entry> _iter356 : _iter355.getValue().entrySet()) +- { +- oprot.writeString(_iter356.getKey()); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter356.getValue().size())); +- for (Mutation _iter357 : _iter356.getValue()) +- { +- _iter357.write(oprot); +- } +- oprot.writeListEnd(); +- } +- } +- oprot.writeMapEnd(); +- } +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class atomic_batch_mutate_argsTupleSchemeFactory implements SchemeFactory { +- public atomic_batch_mutate_argsTupleScheme getScheme() { +- return new atomic_batch_mutate_argsTupleScheme(); +- } +- } +- +- private static class atomic_batch_mutate_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, atomic_batch_mutate_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- { +- oprot.writeI32(struct.mutation_map.size()); +- for (Map.Entry>> _iter358 : struct.mutation_map.entrySet()) +- { +- oprot.writeBinary(_iter358.getKey()); +- { +- oprot.writeI32(_iter358.getValue().size()); +- for (Map.Entry> _iter359 : _iter358.getValue().entrySet()) +- { +- oprot.writeString(_iter359.getKey()); +- { +- oprot.writeI32(_iter359.getValue().size()); +- for (Mutation _iter360 : _iter359.getValue()) +- { +- _iter360.write(oprot); +- } +- } +- } +- } +- } +- } +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, atomic_batch_mutate_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- { +- org.apache.thrift.protocol.TMap _map361 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); +- struct.mutation_map = new HashMap>>(2*_map361.size); +- for (int _i362 = 0; _i362 < _map361.size; ++_i362) +- { +- ByteBuffer _key363; +- Map> _val364; +- _key363 = iprot.readBinary(); +- { +- org.apache.thrift.protocol.TMap _map365 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); +- _val364 = new HashMap>(2*_map365.size); +- for (int _i366 = 0; _i366 < _map365.size; ++_i366) +- { +- String _key367; +- List _val368; +- _key367 = iprot.readString(); +- { +- org.apache.thrift.protocol.TList _list369 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- _val368 = new ArrayList(_list369.size); +- for (int _i370 = 0; _i370 < _list369.size; ++_i370) +- { +- Mutation _elem371; +- _elem371 = new Mutation(); +- _elem371.read(iprot); +- _val368.add(_elem371); +- } +- } +- _val364.put(_key367, _val368); +- } +- } +- struct.mutation_map.put(_key363, _val364); +- } +- } +- struct.setMutation_mapIsSet(true); +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- +- } +- +- public static class atomic_batch_mutate_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("atomic_batch_mutate_result"); +- +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new atomic_batch_mutate_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new atomic_batch_mutate_resultTupleSchemeFactory()); +- } +- +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(atomic_batch_mutate_result.class, metaDataMap); +- } +- +- public atomic_batch_mutate_result() { +- } +- +- public atomic_batch_mutate_result( +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public atomic_batch_mutate_result(atomic_batch_mutate_result other) { +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public atomic_batch_mutate_result deepCopy() { +- return new atomic_batch_mutate_result(this); +- } +- +- @Override +- public void clear() { +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public atomic_batch_mutate_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public atomic_batch_mutate_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public atomic_batch_mutate_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof atomic_batch_mutate_result) +- return this.equals((atomic_batch_mutate_result)that); +- return false; +- } +- +- public boolean equals(atomic_batch_mutate_result that) { +- if (that == null) +- return false; +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(atomic_batch_mutate_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("atomic_batch_mutate_result("); +- boolean first = true; +- +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class atomic_batch_mutate_resultStandardSchemeFactory implements SchemeFactory { +- public atomic_batch_mutate_resultStandardScheme getScheme() { +- return new atomic_batch_mutate_resultStandardScheme(); +- } +- } +- +- private static class atomic_batch_mutate_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, atomic_batch_mutate_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, atomic_batch_mutate_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class atomic_batch_mutate_resultTupleSchemeFactory implements SchemeFactory { +- public atomic_batch_mutate_resultTupleScheme getScheme() { +- return new atomic_batch_mutate_resultTupleScheme(); +- } +- } +- +- private static class atomic_batch_mutate_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, atomic_batch_mutate_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetIre()) { +- optionals.set(0); +- } +- if (struct.isSetUe()) { +- optionals.set(1); +- } +- if (struct.isSetTe()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, atomic_batch_mutate_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(2)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class truncate_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncate_args"); +- +- private static final org.apache.thrift.protocol.TField CFNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("cfname", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new truncate_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new truncate_argsTupleSchemeFactory()); +- } +- +- public String cfname; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- CFNAME((short)1, "cfname"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // CFNAME +- return CFNAME; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.CFNAME, new org.apache.thrift.meta_data.FieldMetaData("cfname", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncate_args.class, metaDataMap); +- } +- +- public truncate_args() { +- } +- +- public truncate_args( +- String cfname) +- { +- this(); +- this.cfname = cfname; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public truncate_args(truncate_args other) { +- if (other.isSetCfname()) { +- this.cfname = other.cfname; +- } +- } +- +- public truncate_args deepCopy() { +- return new truncate_args(this); +- } +- +- @Override +- public void clear() { +- this.cfname = null; +- } +- +- public String getCfname() { +- return this.cfname; +- } +- +- public truncate_args setCfname(String cfname) { +- this.cfname = cfname; +- return this; +- } +- +- public void unsetCfname() { +- this.cfname = null; +- } +- +- /** Returns true if field cfname is set (has been assigned a value) and false otherwise */ +- public boolean isSetCfname() { +- return this.cfname != null; +- } +- +- public void setCfnameIsSet(boolean value) { +- if (!value) { +- this.cfname = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case CFNAME: +- if (value == null) { +- unsetCfname(); +- } else { +- setCfname((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case CFNAME: +- return getCfname(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case CFNAME: +- return isSetCfname(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof truncate_args) +- return this.equals((truncate_args)that); +- return false; +- } +- +- public boolean equals(truncate_args that) { +- if (that == null) +- return false; +- +- boolean this_present_cfname = true && this.isSetCfname(); +- boolean that_present_cfname = true && that.isSetCfname(); +- if (this_present_cfname || that_present_cfname) { +- if (!(this_present_cfname && that_present_cfname)) +- return false; +- if (!this.cfname.equals(that.cfname)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_cfname = true && (isSetCfname()); +- builder.append(present_cfname); +- if (present_cfname) +- builder.append(cfname); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(truncate_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetCfname()).compareTo(other.isSetCfname()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCfname()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cfname, other.cfname); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("truncate_args("); +- boolean first = true; +- +- sb.append("cfname:"); +- if (this.cfname == null) { +- sb.append("null"); +- } else { +- sb.append(this.cfname); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (cfname == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'cfname' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class truncate_argsStandardSchemeFactory implements SchemeFactory { +- public truncate_argsStandardScheme getScheme() { +- return new truncate_argsStandardScheme(); +- } +- } +- +- private static class truncate_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // CFNAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.cfname = iprot.readString(); +- struct.setCfnameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.cfname != null) { +- oprot.writeFieldBegin(CFNAME_FIELD_DESC); +- oprot.writeString(struct.cfname); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class truncate_argsTupleSchemeFactory implements SchemeFactory { +- public truncate_argsTupleScheme getScheme() { +- return new truncate_argsTupleScheme(); +- } +- } +- +- private static class truncate_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, truncate_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.cfname); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, truncate_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.cfname = iprot.readString(); +- struct.setCfnameIsSet(true); +- } +- } +- +- } +- +- public static class truncate_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncate_result"); +- +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new truncate_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new truncate_resultTupleSchemeFactory()); +- } +- +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncate_result.class, metaDataMap); +- } +- +- public truncate_result() { +- } +- +- public truncate_result( +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public truncate_result(truncate_result other) { +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public truncate_result deepCopy() { +- return new truncate_result(this); +- } +- +- @Override +- public void clear() { +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public truncate_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public truncate_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public truncate_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof truncate_result) +- return this.equals((truncate_result)that); +- return false; +- } +- +- public boolean equals(truncate_result that) { +- if (that == null) +- return false; +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(truncate_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("truncate_result("); +- boolean first = true; +- +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class truncate_resultStandardSchemeFactory implements SchemeFactory { +- public truncate_resultStandardScheme getScheme() { +- return new truncate_resultStandardScheme(); +- } +- } +- +- private static class truncate_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class truncate_resultTupleSchemeFactory implements SchemeFactory { +- public truncate_resultTupleScheme getScheme() { +- return new truncate_resultTupleScheme(); +- } +- } +- +- private static class truncate_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, truncate_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetIre()) { +- optionals.set(0); +- } +- if (struct.isSetUe()) { +- optionals.set(1); +- } +- if (struct.isSetTe()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, truncate_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(2)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class get_multi_slice_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_multi_slice_args"); +- +- private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_multi_slice_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_multi_slice_argsTupleSchemeFactory()); +- } +- +- public MultiSliceRequest request; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- REQUEST((short)1, "request"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // REQUEST +- return REQUEST; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MultiSliceRequest.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_multi_slice_args.class, metaDataMap); +- } +- +- public get_multi_slice_args() { +- } +- +- public get_multi_slice_args( +- MultiSliceRequest request) +- { +- this(); +- this.request = request; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_multi_slice_args(get_multi_slice_args other) { +- if (other.isSetRequest()) { +- this.request = new MultiSliceRequest(other.request); +- } +- } +- +- public get_multi_slice_args deepCopy() { +- return new get_multi_slice_args(this); +- } +- +- @Override +- public void clear() { +- this.request = null; +- } +- +- public MultiSliceRequest getRequest() { +- return this.request; +- } +- +- public get_multi_slice_args setRequest(MultiSliceRequest request) { +- this.request = request; +- return this; +- } +- +- public void unsetRequest() { +- this.request = null; +- } +- +- /** Returns true if field request is set (has been assigned a value) and false otherwise */ +- public boolean isSetRequest() { +- return this.request != null; +- } +- +- public void setRequestIsSet(boolean value) { +- if (!value) { +- this.request = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case REQUEST: +- if (value == null) { +- unsetRequest(); +- } else { +- setRequest((MultiSliceRequest)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case REQUEST: +- return getRequest(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case REQUEST: +- return isSetRequest(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_multi_slice_args) +- return this.equals((get_multi_slice_args)that); +- return false; +- } +- +- public boolean equals(get_multi_slice_args that) { +- if (that == null) +- return false; +- +- boolean this_present_request = true && this.isSetRequest(); +- boolean that_present_request = true && that.isSetRequest(); +- if (this_present_request || that_present_request) { +- if (!(this_present_request && that_present_request)) +- return false; +- if (!this.request.equals(that.request)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_request = true && (isSetRequest()); +- builder.append(present_request); +- if (present_request) +- builder.append(request); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_multi_slice_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetRequest()).compareTo(other.isSetRequest()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRequest()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, other.request); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_multi_slice_args("); +- boolean first = true; +- +- sb.append("request:"); +- if (this.request == null) { +- sb.append("null"); +- } else { +- sb.append(this.request); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (request == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'request' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (request != null) { +- request.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_multi_slice_argsStandardSchemeFactory implements SchemeFactory { +- public get_multi_slice_argsStandardScheme getScheme() { +- return new get_multi_slice_argsStandardScheme(); +- } +- } +- +- private static class get_multi_slice_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_multi_slice_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // REQUEST +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.request = new MultiSliceRequest(); +- struct.request.read(iprot); +- struct.setRequestIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_multi_slice_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.request != null) { +- oprot.writeFieldBegin(REQUEST_FIELD_DESC); +- struct.request.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_multi_slice_argsTupleSchemeFactory implements SchemeFactory { +- public get_multi_slice_argsTupleScheme getScheme() { +- return new get_multi_slice_argsTupleScheme(); +- } +- } +- +- private static class get_multi_slice_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_multi_slice_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- struct.request.write(oprot); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_multi_slice_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.request = new MultiSliceRequest(); +- struct.request.read(iprot); +- struct.setRequestIsSet(true); +- } +- } +- +- } +- +- public static class get_multi_slice_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_multi_slice_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new get_multi_slice_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new get_multi_slice_resultTupleSchemeFactory()); +- } +- +- public List success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnOrSuperColumn.class)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_multi_slice_result.class, metaDataMap); +- } +- +- public get_multi_slice_result() { +- } +- +- public get_multi_slice_result( +- List success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public get_multi_slice_result(get_multi_slice_result other) { +- if (other.isSetSuccess()) { +- List __this__success = new ArrayList(other.success.size()); +- for (ColumnOrSuperColumn other_element : other.success) { +- __this__success.add(new ColumnOrSuperColumn(other_element)); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- } +- +- public get_multi_slice_result deepCopy() { +- return new get_multi_slice_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public java.util.Iterator getSuccessIterator() { +- return (this.success == null) ? null : this.success.iterator(); +- } +- +- public void addToSuccess(ColumnOrSuperColumn elem) { +- if (this.success == null) { +- this.success = new ArrayList(); +- } +- this.success.add(elem); +- } +- +- public List getSuccess() { +- return this.success; +- } +- +- public get_multi_slice_result setSuccess(List success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public get_multi_slice_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public get_multi_slice_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public get_multi_slice_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((List)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof get_multi_slice_result) +- return this.equals((get_multi_slice_result)that); +- return false; +- } +- +- public boolean equals(get_multi_slice_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(get_multi_slice_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("get_multi_slice_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class get_multi_slice_resultStandardSchemeFactory implements SchemeFactory { +- public get_multi_slice_resultStandardScheme getScheme() { +- return new get_multi_slice_resultStandardScheme(); +- } +- } +- +- private static class get_multi_slice_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, get_multi_slice_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list372 = iprot.readListBegin(); +- struct.success = new ArrayList(_list372.size); +- for (int _i373 = 0; _i373 < _list372.size; ++_i373) +- { +- ColumnOrSuperColumn _elem374; +- _elem374 = new ColumnOrSuperColumn(); +- _elem374.read(iprot); +- struct.success.add(_elem374); +- } +- iprot.readListEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, get_multi_slice_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); +- for (ColumnOrSuperColumn _iter375 : struct.success) +- { +- _iter375.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class get_multi_slice_resultTupleSchemeFactory implements SchemeFactory { +- public get_multi_slice_resultTupleScheme getScheme() { +- return new get_multi_slice_resultTupleScheme(); +- } +- } +- +- private static class get_multi_slice_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, get_multi_slice_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- oprot.writeBitSet(optionals, 4); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (ColumnOrSuperColumn _iter376 : struct.success) +- { +- _iter376.write(oprot); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, get_multi_slice_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(4); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list377 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.success = new ArrayList(_list377.size); +- for (int _i378 = 0; _i378 < _list377.size; ++_i378) +- { +- ColumnOrSuperColumn _elem379; +- _elem379 = new ColumnOrSuperColumn(); +- _elem379.read(iprot); +- struct.success.add(_elem379); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_schema_versions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_schema_versions_args"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_schema_versions_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_schema_versions_argsTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_schema_versions_args.class, metaDataMap); +- } +- +- public describe_schema_versions_args() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_schema_versions_args(describe_schema_versions_args other) { +- } +- +- public describe_schema_versions_args deepCopy() { +- return new describe_schema_versions_args(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_schema_versions_args) +- return this.equals((describe_schema_versions_args)that); +- return false; +- } +- +- public boolean equals(describe_schema_versions_args that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_schema_versions_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_schema_versions_args("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_schema_versions_argsStandardSchemeFactory implements SchemeFactory { +- public describe_schema_versions_argsStandardScheme getScheme() { +- return new describe_schema_versions_argsStandardScheme(); +- } +- } +- +- private static class describe_schema_versions_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_schema_versions_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_schema_versions_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_schema_versions_argsTupleSchemeFactory implements SchemeFactory { +- public describe_schema_versions_argsTupleScheme getScheme() { +- return new describe_schema_versions_argsTupleScheme(); +- } +- } +- +- private static class describe_schema_versions_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_schema_versions_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_schema_versions_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +- } +- +- public static class describe_schema_versions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_schema_versions_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_schema_versions_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_schema_versions_resultTupleSchemeFactory()); +- } +- +- public Map> success; // required +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_schema_versions_result.class, metaDataMap); +- } +- +- public describe_schema_versions_result() { +- } +- +- public describe_schema_versions_result( +- Map> success, +- InvalidRequestException ire) +- { +- this(); +- this.success = success; +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_schema_versions_result(describe_schema_versions_result other) { +- if (other.isSetSuccess()) { +- Map> __this__success = new HashMap>(other.success.size()); +- for (Map.Entry> other_element : other.success.entrySet()) { +- +- String other_element_key = other_element.getKey(); +- List other_element_value = other_element.getValue(); +- +- String __this__success_copy_key = other_element_key; +- +- List __this__success_copy_value = new ArrayList(other_element_value); +- +- __this__success.put(__this__success_copy_key, __this__success_copy_value); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public describe_schema_versions_result deepCopy() { +- return new describe_schema_versions_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public void putToSuccess(String key, List val) { +- if (this.success == null) { +- this.success = new HashMap>(); +- } +- this.success.put(key, val); +- } +- +- public Map> getSuccess() { +- return this.success; +- } +- +- public describe_schema_versions_result setSuccess(Map> success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public describe_schema_versions_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((Map>)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_schema_versions_result) +- return this.equals((describe_schema_versions_result)that); +- return false; +- } +- +- public boolean equals(describe_schema_versions_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_schema_versions_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_schema_versions_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_schema_versions_resultStandardSchemeFactory implements SchemeFactory { +- public describe_schema_versions_resultStandardScheme getScheme() { +- return new describe_schema_versions_resultStandardScheme(); +- } +- } +- +- private static class describe_schema_versions_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_schema_versions_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map380 = iprot.readMapBegin(); +- struct.success = new HashMap>(2*_map380.size); +- for (int _i381 = 0; _i381 < _map380.size; ++_i381) +- { +- String _key382; +- List _val383; +- _key382 = iprot.readString(); +- { +- org.apache.thrift.protocol.TList _list384 = iprot.readListBegin(); +- _val383 = new ArrayList(_list384.size); +- for (int _i385 = 0; _i385 < _list384.size; ++_i385) +- { +- String _elem386; +- _elem386 = iprot.readString(); +- _val383.add(_elem386); +- } +- iprot.readListEnd(); +- } +- struct.success.put(_key382, _val383); +- } +- iprot.readMapEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_schema_versions_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.success.size())); +- for (Map.Entry> _iter387 : struct.success.entrySet()) +- { +- oprot.writeString(_iter387.getKey()); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter387.getValue().size())); +- for (String _iter388 : _iter387.getValue()) +- { +- oprot.writeString(_iter388); +- } +- oprot.writeListEnd(); +- } +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_schema_versions_resultTupleSchemeFactory implements SchemeFactory { +- public describe_schema_versions_resultTupleScheme getScheme() { +- return new describe_schema_versions_resultTupleScheme(); +- } +- } +- +- private static class describe_schema_versions_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_schema_versions_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (Map.Entry> _iter389 : struct.success.entrySet()) +- { +- oprot.writeString(_iter389.getKey()); +- { +- oprot.writeI32(_iter389.getValue().size()); +- for (String _iter390 : _iter389.getValue()) +- { +- oprot.writeString(_iter390); +- } +- } +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_schema_versions_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TMap _map391 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); +- struct.success = new HashMap>(2*_map391.size); +- for (int _i392 = 0; _i392 < _map391.size; ++_i392) +- { +- String _key393; +- List _val394; +- _key393 = iprot.readString(); +- { +- org.apache.thrift.protocol.TList _list395 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- _val394 = new ArrayList(_list395.size); +- for (int _i396 = 0; _i396 < _list395.size; ++_i396) +- { +- String _elem397; +- _elem397 = iprot.readString(); +- _val394.add(_elem397); +- } +- } +- struct.success.put(_key393, _val394); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_keyspaces_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_keyspaces_args"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_keyspaces_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_keyspaces_argsTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_keyspaces_args.class, metaDataMap); +- } +- +- public describe_keyspaces_args() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_keyspaces_args(describe_keyspaces_args other) { +- } +- +- public describe_keyspaces_args deepCopy() { +- return new describe_keyspaces_args(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_keyspaces_args) +- return this.equals((describe_keyspaces_args)that); +- return false; +- } +- +- public boolean equals(describe_keyspaces_args that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_keyspaces_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_keyspaces_args("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_keyspaces_argsStandardSchemeFactory implements SchemeFactory { +- public describe_keyspaces_argsStandardScheme getScheme() { +- return new describe_keyspaces_argsStandardScheme(); +- } +- } +- +- private static class describe_keyspaces_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_keyspaces_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_keyspaces_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_keyspaces_argsTupleSchemeFactory implements SchemeFactory { +- public describe_keyspaces_argsTupleScheme getScheme() { +- return new describe_keyspaces_argsTupleScheme(); +- } +- } +- +- private static class describe_keyspaces_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_keyspaces_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_keyspaces_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +- } +- +- public static class describe_keyspaces_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_keyspaces_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_keyspaces_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_keyspaces_resultTupleSchemeFactory()); +- } +- +- public List success; // required +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, KsDef.class)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_keyspaces_result.class, metaDataMap); +- } +- +- public describe_keyspaces_result() { +- } +- +- public describe_keyspaces_result( +- List success, +- InvalidRequestException ire) +- { +- this(); +- this.success = success; +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_keyspaces_result(describe_keyspaces_result other) { +- if (other.isSetSuccess()) { +- List __this__success = new ArrayList(other.success.size()); +- for (KsDef other_element : other.success) { +- __this__success.add(new KsDef(other_element)); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public describe_keyspaces_result deepCopy() { +- return new describe_keyspaces_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public java.util.Iterator getSuccessIterator() { +- return (this.success == null) ? null : this.success.iterator(); +- } +- +- public void addToSuccess(KsDef elem) { +- if (this.success == null) { +- this.success = new ArrayList(); +- } +- this.success.add(elem); +- } +- +- public List getSuccess() { +- return this.success; +- } +- +- public describe_keyspaces_result setSuccess(List success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public describe_keyspaces_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((List)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_keyspaces_result) +- return this.equals((describe_keyspaces_result)that); +- return false; +- } +- +- public boolean equals(describe_keyspaces_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_keyspaces_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_keyspaces_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_keyspaces_resultStandardSchemeFactory implements SchemeFactory { +- public describe_keyspaces_resultStandardScheme getScheme() { +- return new describe_keyspaces_resultStandardScheme(); +- } +- } +- +- private static class describe_keyspaces_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_keyspaces_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list398 = iprot.readListBegin(); +- struct.success = new ArrayList(_list398.size); +- for (int _i399 = 0; _i399 < _list398.size; ++_i399) +- { +- KsDef _elem400; +- _elem400 = new KsDef(); +- _elem400.read(iprot); +- struct.success.add(_elem400); +- } +- iprot.readListEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_keyspaces_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); +- for (KsDef _iter401 : struct.success) +- { +- _iter401.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_keyspaces_resultTupleSchemeFactory implements SchemeFactory { +- public describe_keyspaces_resultTupleScheme getScheme() { +- return new describe_keyspaces_resultTupleScheme(); +- } +- } +- +- private static class describe_keyspaces_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_keyspaces_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (KsDef _iter402 : struct.success) +- { +- _iter402.write(oprot); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_keyspaces_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list403 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.success = new ArrayList(_list403.size); +- for (int _i404 = 0; _i404 < _list403.size; ++_i404) +- { +- KsDef _elem405; +- _elem405 = new KsDef(); +- _elem405.read(iprot); +- struct.success.add(_elem405); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_cluster_name_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_cluster_name_args"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_cluster_name_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_cluster_name_argsTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_cluster_name_args.class, metaDataMap); +- } +- +- public describe_cluster_name_args() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_cluster_name_args(describe_cluster_name_args other) { +- } +- +- public describe_cluster_name_args deepCopy() { +- return new describe_cluster_name_args(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_cluster_name_args) +- return this.equals((describe_cluster_name_args)that); +- return false; +- } +- +- public boolean equals(describe_cluster_name_args that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_cluster_name_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_cluster_name_args("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_cluster_name_argsStandardSchemeFactory implements SchemeFactory { +- public describe_cluster_name_argsStandardScheme getScheme() { +- return new describe_cluster_name_argsStandardScheme(); +- } +- } +- +- private static class describe_cluster_name_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_cluster_name_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_cluster_name_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_cluster_name_argsTupleSchemeFactory implements SchemeFactory { +- public describe_cluster_name_argsTupleScheme getScheme() { +- return new describe_cluster_name_argsTupleScheme(); +- } +- } +- +- private static class describe_cluster_name_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_cluster_name_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_cluster_name_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +- } +- +- public static class describe_cluster_name_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_cluster_name_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_cluster_name_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_cluster_name_resultTupleSchemeFactory()); +- } +- +- public String success; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_cluster_name_result.class, metaDataMap); +- } +- +- public describe_cluster_name_result() { +- } +- +- public describe_cluster_name_result( +- String success) +- { +- this(); +- this.success = success; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_cluster_name_result(describe_cluster_name_result other) { +- if (other.isSetSuccess()) { +- this.success = other.success; +- } +- } +- +- public describe_cluster_name_result deepCopy() { +- return new describe_cluster_name_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- } +- +- public String getSuccess() { +- return this.success; +- } +- +- public describe_cluster_name_result setSuccess(String success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_cluster_name_result) +- return this.equals((describe_cluster_name_result)that); +- return false; +- } +- +- public boolean equals(describe_cluster_name_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_cluster_name_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_cluster_name_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_cluster_name_resultStandardSchemeFactory implements SchemeFactory { +- public describe_cluster_name_resultStandardScheme getScheme() { +- return new describe_cluster_name_resultStandardScheme(); +- } +- } +- +- private static class describe_cluster_name_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_cluster_name_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_cluster_name_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeString(struct.success); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_cluster_name_resultTupleSchemeFactory implements SchemeFactory { +- public describe_cluster_name_resultTupleScheme getScheme() { +- return new describe_cluster_name_resultTupleScheme(); +- } +- } +- +- private static class describe_cluster_name_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_cluster_name_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- oprot.writeBitSet(optionals, 1); +- if (struct.isSetSuccess()) { +- oprot.writeString(struct.success); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_cluster_name_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(1); +- if (incoming.get(0)) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_version_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_version_args"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_version_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_version_argsTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_version_args.class, metaDataMap); +- } +- +- public describe_version_args() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_version_args(describe_version_args other) { +- } +- +- public describe_version_args deepCopy() { +- return new describe_version_args(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_version_args) +- return this.equals((describe_version_args)that); +- return false; +- } +- +- public boolean equals(describe_version_args that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_version_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_version_args("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_version_argsStandardSchemeFactory implements SchemeFactory { +- public describe_version_argsStandardScheme getScheme() { +- return new describe_version_argsStandardScheme(); +- } +- } +- +- private static class describe_version_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_version_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_version_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_version_argsTupleSchemeFactory implements SchemeFactory { +- public describe_version_argsTupleScheme getScheme() { +- return new describe_version_argsTupleScheme(); +- } +- } +- +- private static class describe_version_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_version_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_version_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +- } +- +- public static class describe_version_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_version_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_version_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_version_resultTupleSchemeFactory()); +- } +- +- public String success; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_version_result.class, metaDataMap); +- } +- +- public describe_version_result() { +- } +- +- public describe_version_result( +- String success) +- { +- this(); +- this.success = success; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_version_result(describe_version_result other) { +- if (other.isSetSuccess()) { +- this.success = other.success; +- } +- } +- +- public describe_version_result deepCopy() { +- return new describe_version_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- } +- +- public String getSuccess() { +- return this.success; +- } +- +- public describe_version_result setSuccess(String success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_version_result) +- return this.equals((describe_version_result)that); +- return false; +- } +- +- public boolean equals(describe_version_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_version_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_version_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_version_resultStandardSchemeFactory implements SchemeFactory { +- public describe_version_resultStandardScheme getScheme() { +- return new describe_version_resultStandardScheme(); +- } +- } +- +- private static class describe_version_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_version_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_version_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeString(struct.success); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_version_resultTupleSchemeFactory implements SchemeFactory { +- public describe_version_resultTupleScheme getScheme() { +- return new describe_version_resultTupleScheme(); +- } +- } +- +- private static class describe_version_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_version_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- oprot.writeBitSet(optionals, 1); +- if (struct.isSetSuccess()) { +- oprot.writeString(struct.success); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_version_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(1); +- if (incoming.get(0)) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_ring_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_ring_args"); +- +- private static final org.apache.thrift.protocol.TField KEYSPACE_FIELD_DESC = new org.apache.thrift.protocol.TField("keyspace", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_ring_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_ring_argsTupleSchemeFactory()); +- } +- +- public String keyspace; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEYSPACE((short)1, "keyspace"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEYSPACE +- return KEYSPACE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEYSPACE, new org.apache.thrift.meta_data.FieldMetaData("keyspace", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_ring_args.class, metaDataMap); +- } +- +- public describe_ring_args() { +- } +- +- public describe_ring_args( +- String keyspace) +- { +- this(); +- this.keyspace = keyspace; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_ring_args(describe_ring_args other) { +- if (other.isSetKeyspace()) { +- this.keyspace = other.keyspace; +- } +- } +- +- public describe_ring_args deepCopy() { +- return new describe_ring_args(this); +- } +- +- @Override +- public void clear() { +- this.keyspace = null; +- } +- +- public String getKeyspace() { +- return this.keyspace; +- } +- +- public describe_ring_args setKeyspace(String keyspace) { +- this.keyspace = keyspace; +- return this; +- } +- +- public void unsetKeyspace() { +- this.keyspace = null; +- } +- +- /** Returns true if field keyspace is set (has been assigned a value) and false otherwise */ +- public boolean isSetKeyspace() { +- return this.keyspace != null; +- } +- +- public void setKeyspaceIsSet(boolean value) { +- if (!value) { +- this.keyspace = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEYSPACE: +- if (value == null) { +- unsetKeyspace(); +- } else { +- setKeyspace((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEYSPACE: +- return getKeyspace(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEYSPACE: +- return isSetKeyspace(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_ring_args) +- return this.equals((describe_ring_args)that); +- return false; +- } +- +- public boolean equals(describe_ring_args that) { +- if (that == null) +- return false; +- +- boolean this_present_keyspace = true && this.isSetKeyspace(); +- boolean that_present_keyspace = true && that.isSetKeyspace(); +- if (this_present_keyspace || that_present_keyspace) { +- if (!(this_present_keyspace && that_present_keyspace)) +- return false; +- if (!this.keyspace.equals(that.keyspace)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_keyspace = true && (isSetKeyspace()); +- builder.append(present_keyspace); +- if (present_keyspace) +- builder.append(keyspace); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_ring_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKeyspace()).compareTo(other.isSetKeyspace()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKeyspace()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keyspace, other.keyspace); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_ring_args("); +- boolean first = true; +- +- sb.append("keyspace:"); +- if (this.keyspace == null) { +- sb.append("null"); +- } else { +- sb.append(this.keyspace); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (keyspace == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'keyspace' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_ring_argsStandardSchemeFactory implements SchemeFactory { +- public describe_ring_argsStandardScheme getScheme() { +- return new describe_ring_argsStandardScheme(); +- } +- } +- +- private static class describe_ring_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_ring_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEYSPACE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_ring_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.keyspace != null) { +- oprot.writeFieldBegin(KEYSPACE_FIELD_DESC); +- oprot.writeString(struct.keyspace); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_ring_argsTupleSchemeFactory implements SchemeFactory { +- public describe_ring_argsTupleScheme getScheme() { +- return new describe_ring_argsTupleScheme(); +- } +- } +- +- private static class describe_ring_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_ring_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.keyspace); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_ring_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } +- } +- +- } +- +- public static class describe_ring_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_ring_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_ring_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_ring_resultTupleSchemeFactory()); +- } +- +- public List success; // required +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TokenRange.class)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_ring_result.class, metaDataMap); +- } +- +- public describe_ring_result() { +- } +- +- public describe_ring_result( +- List success, +- InvalidRequestException ire) +- { +- this(); +- this.success = success; +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_ring_result(describe_ring_result other) { +- if (other.isSetSuccess()) { +- List __this__success = new ArrayList(other.success.size()); +- for (TokenRange other_element : other.success) { +- __this__success.add(new TokenRange(other_element)); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public describe_ring_result deepCopy() { +- return new describe_ring_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public java.util.Iterator getSuccessIterator() { +- return (this.success == null) ? null : this.success.iterator(); +- } +- +- public void addToSuccess(TokenRange elem) { +- if (this.success == null) { +- this.success = new ArrayList(); +- } +- this.success.add(elem); +- } +- +- public List getSuccess() { +- return this.success; +- } +- +- public describe_ring_result setSuccess(List success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public describe_ring_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((List)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_ring_result) +- return this.equals((describe_ring_result)that); +- return false; +- } +- +- public boolean equals(describe_ring_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_ring_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_ring_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_ring_resultStandardSchemeFactory implements SchemeFactory { +- public describe_ring_resultStandardScheme getScheme() { +- return new describe_ring_resultStandardScheme(); +- } +- } +- +- private static class describe_ring_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_ring_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list406 = iprot.readListBegin(); +- struct.success = new ArrayList(_list406.size); +- for (int _i407 = 0; _i407 < _list406.size; ++_i407) +- { +- TokenRange _elem408; +- _elem408 = new TokenRange(); +- _elem408.read(iprot); +- struct.success.add(_elem408); +- } +- iprot.readListEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_ring_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); +- for (TokenRange _iter409 : struct.success) +- { +- _iter409.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_ring_resultTupleSchemeFactory implements SchemeFactory { +- public describe_ring_resultTupleScheme getScheme() { +- return new describe_ring_resultTupleScheme(); +- } +- } +- +- private static class describe_ring_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_ring_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (TokenRange _iter410 : struct.success) +- { +- _iter410.write(oprot); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_ring_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.success = new ArrayList(_list411.size); +- for (int _i412 = 0; _i412 < _list411.size; ++_i412) +- { +- TokenRange _elem413; +- _elem413 = new TokenRange(); +- _elem413.read(iprot); +- struct.success.add(_elem413); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_local_ring_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_local_ring_args"); +- +- private static final org.apache.thrift.protocol.TField KEYSPACE_FIELD_DESC = new org.apache.thrift.protocol.TField("keyspace", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_local_ring_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_local_ring_argsTupleSchemeFactory()); +- } +- +- public String keyspace; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEYSPACE((short)1, "keyspace"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEYSPACE +- return KEYSPACE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEYSPACE, new org.apache.thrift.meta_data.FieldMetaData("keyspace", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_local_ring_args.class, metaDataMap); +- } +- +- public describe_local_ring_args() { +- } +- +- public describe_local_ring_args( +- String keyspace) +- { +- this(); +- this.keyspace = keyspace; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_local_ring_args(describe_local_ring_args other) { +- if (other.isSetKeyspace()) { +- this.keyspace = other.keyspace; +- } +- } +- +- public describe_local_ring_args deepCopy() { +- return new describe_local_ring_args(this); +- } +- +- @Override +- public void clear() { +- this.keyspace = null; +- } +- +- public String getKeyspace() { +- return this.keyspace; +- } +- +- public describe_local_ring_args setKeyspace(String keyspace) { +- this.keyspace = keyspace; +- return this; +- } +- +- public void unsetKeyspace() { +- this.keyspace = null; +- } +- +- /** Returns true if field keyspace is set (has been assigned a value) and false otherwise */ +- public boolean isSetKeyspace() { +- return this.keyspace != null; +- } +- +- public void setKeyspaceIsSet(boolean value) { +- if (!value) { +- this.keyspace = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEYSPACE: +- if (value == null) { +- unsetKeyspace(); +- } else { +- setKeyspace((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEYSPACE: +- return getKeyspace(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEYSPACE: +- return isSetKeyspace(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_local_ring_args) +- return this.equals((describe_local_ring_args)that); +- return false; +- } +- +- public boolean equals(describe_local_ring_args that) { +- if (that == null) +- return false; +- +- boolean this_present_keyspace = true && this.isSetKeyspace(); +- boolean that_present_keyspace = true && that.isSetKeyspace(); +- if (this_present_keyspace || that_present_keyspace) { +- if (!(this_present_keyspace && that_present_keyspace)) +- return false; +- if (!this.keyspace.equals(that.keyspace)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_keyspace = true && (isSetKeyspace()); +- builder.append(present_keyspace); +- if (present_keyspace) +- builder.append(keyspace); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_local_ring_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKeyspace()).compareTo(other.isSetKeyspace()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKeyspace()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keyspace, other.keyspace); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_local_ring_args("); +- boolean first = true; +- +- sb.append("keyspace:"); +- if (this.keyspace == null) { +- sb.append("null"); +- } else { +- sb.append(this.keyspace); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (keyspace == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'keyspace' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_local_ring_argsStandardSchemeFactory implements SchemeFactory { +- public describe_local_ring_argsStandardScheme getScheme() { +- return new describe_local_ring_argsStandardScheme(); +- } +- } +- +- private static class describe_local_ring_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_local_ring_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEYSPACE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_local_ring_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.keyspace != null) { +- oprot.writeFieldBegin(KEYSPACE_FIELD_DESC); +- oprot.writeString(struct.keyspace); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_local_ring_argsTupleSchemeFactory implements SchemeFactory { +- public describe_local_ring_argsTupleScheme getScheme() { +- return new describe_local_ring_argsTupleScheme(); +- } +- } +- +- private static class describe_local_ring_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_local_ring_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.keyspace); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_local_ring_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } +- } +- +- } +- +- public static class describe_local_ring_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_local_ring_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_local_ring_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_local_ring_resultTupleSchemeFactory()); +- } +- +- public List success; // required +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TokenRange.class)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_local_ring_result.class, metaDataMap); +- } +- +- public describe_local_ring_result() { +- } +- +- public describe_local_ring_result( +- List success, +- InvalidRequestException ire) +- { +- this(); +- this.success = success; +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_local_ring_result(describe_local_ring_result other) { +- if (other.isSetSuccess()) { +- List __this__success = new ArrayList(other.success.size()); +- for (TokenRange other_element : other.success) { +- __this__success.add(new TokenRange(other_element)); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public describe_local_ring_result deepCopy() { +- return new describe_local_ring_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public java.util.Iterator getSuccessIterator() { +- return (this.success == null) ? null : this.success.iterator(); +- } +- +- public void addToSuccess(TokenRange elem) { +- if (this.success == null) { +- this.success = new ArrayList(); +- } +- this.success.add(elem); +- } +- +- public List getSuccess() { +- return this.success; +- } +- +- public describe_local_ring_result setSuccess(List success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public describe_local_ring_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((List)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_local_ring_result) +- return this.equals((describe_local_ring_result)that); +- return false; +- } +- +- public boolean equals(describe_local_ring_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_local_ring_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_local_ring_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_local_ring_resultStandardSchemeFactory implements SchemeFactory { +- public describe_local_ring_resultStandardScheme getScheme() { +- return new describe_local_ring_resultStandardScheme(); +- } +- } +- +- private static class describe_local_ring_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_local_ring_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list414 = iprot.readListBegin(); +- struct.success = new ArrayList(_list414.size); +- for (int _i415 = 0; _i415 < _list414.size; ++_i415) +- { +- TokenRange _elem416; +- _elem416 = new TokenRange(); +- _elem416.read(iprot); +- struct.success.add(_elem416); +- } +- iprot.readListEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_local_ring_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); +- for (TokenRange _iter417 : struct.success) +- { +- _iter417.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_local_ring_resultTupleSchemeFactory implements SchemeFactory { +- public describe_local_ring_resultTupleScheme getScheme() { +- return new describe_local_ring_resultTupleScheme(); +- } +- } +- +- private static class describe_local_ring_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_local_ring_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (TokenRange _iter418 : struct.success) +- { +- _iter418.write(oprot); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_local_ring_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.success = new ArrayList(_list419.size); +- for (int _i420 = 0; _i420 < _list419.size; ++_i420) +- { +- TokenRange _elem421; +- _elem421 = new TokenRange(); +- _elem421.read(iprot); +- struct.success.add(_elem421); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_token_map_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_token_map_args"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_token_map_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_token_map_argsTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_token_map_args.class, metaDataMap); +- } +- +- public describe_token_map_args() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_token_map_args(describe_token_map_args other) { +- } +- +- public describe_token_map_args deepCopy() { +- return new describe_token_map_args(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_token_map_args) +- return this.equals((describe_token_map_args)that); +- return false; +- } +- +- public boolean equals(describe_token_map_args that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_token_map_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_token_map_args("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_token_map_argsStandardSchemeFactory implements SchemeFactory { +- public describe_token_map_argsStandardScheme getScheme() { +- return new describe_token_map_argsStandardScheme(); +- } +- } +- +- private static class describe_token_map_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_token_map_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_token_map_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_token_map_argsTupleSchemeFactory implements SchemeFactory { +- public describe_token_map_argsTupleScheme getScheme() { +- return new describe_token_map_argsTupleScheme(); +- } +- } +- +- private static class describe_token_map_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_token_map_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_token_map_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +- } +- +- public static class describe_token_map_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_token_map_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_token_map_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_token_map_resultTupleSchemeFactory()); +- } +- +- public Map success; // required +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_token_map_result.class, metaDataMap); +- } +- +- public describe_token_map_result() { +- } +- +- public describe_token_map_result( +- Map success, +- InvalidRequestException ire) +- { +- this(); +- this.success = success; +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_token_map_result(describe_token_map_result other) { +- if (other.isSetSuccess()) { +- Map __this__success = new HashMap(other.success); +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public describe_token_map_result deepCopy() { +- return new describe_token_map_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public void putToSuccess(String key, String val) { +- if (this.success == null) { +- this.success = new HashMap(); +- } +- this.success.put(key, val); +- } +- +- public Map getSuccess() { +- return this.success; +- } +- +- public describe_token_map_result setSuccess(Map success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public describe_token_map_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((Map)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_token_map_result) +- return this.equals((describe_token_map_result)that); +- return false; +- } +- +- public boolean equals(describe_token_map_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_token_map_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_token_map_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_token_map_resultStandardSchemeFactory implements SchemeFactory { +- public describe_token_map_resultStandardScheme getScheme() { +- return new describe_token_map_resultStandardScheme(); +- } +- } +- +- private static class describe_token_map_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_token_map_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map422 = iprot.readMapBegin(); +- struct.success = new HashMap(2*_map422.size); +- for (int _i423 = 0; _i423 < _map422.size; ++_i423) +- { +- String _key424; +- String _val425; +- _key424 = iprot.readString(); +- _val425 = iprot.readString(); +- struct.success.put(_key424, _val425); +- } +- iprot.readMapEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_token_map_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); +- for (Map.Entry _iter426 : struct.success.entrySet()) +- { +- oprot.writeString(_iter426.getKey()); +- oprot.writeString(_iter426.getValue()); +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_token_map_resultTupleSchemeFactory implements SchemeFactory { +- public describe_token_map_resultTupleScheme getScheme() { +- return new describe_token_map_resultTupleScheme(); +- } +- } +- +- private static class describe_token_map_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_token_map_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (Map.Entry _iter427 : struct.success.entrySet()) +- { +- oprot.writeString(_iter427.getKey()); +- oprot.writeString(_iter427.getValue()); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_token_map_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TMap _map428 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.success = new HashMap(2*_map428.size); +- for (int _i429 = 0; _i429 < _map428.size; ++_i429) +- { +- String _key430; +- String _val431; +- _key430 = iprot.readString(); +- _val431 = iprot.readString(); +- struct.success.put(_key430, _val431); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_partitioner_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_partitioner_args"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_partitioner_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_partitioner_argsTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_partitioner_args.class, metaDataMap); +- } +- +- public describe_partitioner_args() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_partitioner_args(describe_partitioner_args other) { +- } +- +- public describe_partitioner_args deepCopy() { +- return new describe_partitioner_args(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_partitioner_args) +- return this.equals((describe_partitioner_args)that); +- return false; +- } +- +- public boolean equals(describe_partitioner_args that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_partitioner_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_partitioner_args("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_partitioner_argsStandardSchemeFactory implements SchemeFactory { +- public describe_partitioner_argsStandardScheme getScheme() { +- return new describe_partitioner_argsStandardScheme(); +- } +- } +- +- private static class describe_partitioner_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_partitioner_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_partitioner_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_partitioner_argsTupleSchemeFactory implements SchemeFactory { +- public describe_partitioner_argsTupleScheme getScheme() { +- return new describe_partitioner_argsTupleScheme(); +- } +- } +- +- private static class describe_partitioner_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_partitioner_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_partitioner_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +- } +- +- public static class describe_partitioner_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_partitioner_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_partitioner_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_partitioner_resultTupleSchemeFactory()); +- } +- +- public String success; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_partitioner_result.class, metaDataMap); +- } +- +- public describe_partitioner_result() { +- } +- +- public describe_partitioner_result( +- String success) +- { +- this(); +- this.success = success; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_partitioner_result(describe_partitioner_result other) { +- if (other.isSetSuccess()) { +- this.success = other.success; +- } +- } +- +- public describe_partitioner_result deepCopy() { +- return new describe_partitioner_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- } +- +- public String getSuccess() { +- return this.success; +- } +- +- public describe_partitioner_result setSuccess(String success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_partitioner_result) +- return this.equals((describe_partitioner_result)that); +- return false; +- } +- +- public boolean equals(describe_partitioner_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_partitioner_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_partitioner_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_partitioner_resultStandardSchemeFactory implements SchemeFactory { +- public describe_partitioner_resultStandardScheme getScheme() { +- return new describe_partitioner_resultStandardScheme(); +- } +- } +- +- private static class describe_partitioner_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_partitioner_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_partitioner_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeString(struct.success); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_partitioner_resultTupleSchemeFactory implements SchemeFactory { +- public describe_partitioner_resultTupleScheme getScheme() { +- return new describe_partitioner_resultTupleScheme(); +- } +- } +- +- private static class describe_partitioner_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_partitioner_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- oprot.writeBitSet(optionals, 1); +- if (struct.isSetSuccess()) { +- oprot.writeString(struct.success); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_partitioner_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(1); +- if (incoming.get(0)) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_snitch_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_snitch_args"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_snitch_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_snitch_argsTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_snitch_args.class, metaDataMap); +- } +- +- public describe_snitch_args() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_snitch_args(describe_snitch_args other) { +- } +- +- public describe_snitch_args deepCopy() { +- return new describe_snitch_args(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_snitch_args) +- return this.equals((describe_snitch_args)that); +- return false; +- } +- +- public boolean equals(describe_snitch_args that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_snitch_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_snitch_args("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_snitch_argsStandardSchemeFactory implements SchemeFactory { +- public describe_snitch_argsStandardScheme getScheme() { +- return new describe_snitch_argsStandardScheme(); +- } +- } +- +- private static class describe_snitch_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_snitch_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_snitch_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_snitch_argsTupleSchemeFactory implements SchemeFactory { +- public describe_snitch_argsTupleScheme getScheme() { +- return new describe_snitch_argsTupleScheme(); +- } +- } +- +- private static class describe_snitch_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_snitch_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_snitch_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +- } +- +- public static class describe_snitch_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_snitch_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_snitch_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_snitch_resultTupleSchemeFactory()); +- } +- +- public String success; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_snitch_result.class, metaDataMap); +- } +- +- public describe_snitch_result() { +- } +- +- public describe_snitch_result( +- String success) +- { +- this(); +- this.success = success; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_snitch_result(describe_snitch_result other) { +- if (other.isSetSuccess()) { +- this.success = other.success; +- } +- } +- +- public describe_snitch_result deepCopy() { +- return new describe_snitch_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- } +- +- public String getSuccess() { +- return this.success; +- } +- +- public describe_snitch_result setSuccess(String success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_snitch_result) +- return this.equals((describe_snitch_result)that); +- return false; +- } +- +- public boolean equals(describe_snitch_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_snitch_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_snitch_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_snitch_resultStandardSchemeFactory implements SchemeFactory { +- public describe_snitch_resultStandardScheme getScheme() { +- return new describe_snitch_resultStandardScheme(); +- } +- } +- +- private static class describe_snitch_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_snitch_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_snitch_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeString(struct.success); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_snitch_resultTupleSchemeFactory implements SchemeFactory { +- public describe_snitch_resultTupleScheme getScheme() { +- return new describe_snitch_resultTupleScheme(); +- } +- } +- +- private static class describe_snitch_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_snitch_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- oprot.writeBitSet(optionals, 1); +- if (struct.isSetSuccess()) { +- oprot.writeString(struct.success); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_snitch_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(1); +- if (incoming.get(0)) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_keyspace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_keyspace_args"); +- +- private static final org.apache.thrift.protocol.TField KEYSPACE_FIELD_DESC = new org.apache.thrift.protocol.TField("keyspace", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_keyspace_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_keyspace_argsTupleSchemeFactory()); +- } +- +- public String keyspace; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEYSPACE((short)1, "keyspace"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEYSPACE +- return KEYSPACE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEYSPACE, new org.apache.thrift.meta_data.FieldMetaData("keyspace", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_keyspace_args.class, metaDataMap); +- } +- +- public describe_keyspace_args() { +- } +- +- public describe_keyspace_args( +- String keyspace) +- { +- this(); +- this.keyspace = keyspace; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_keyspace_args(describe_keyspace_args other) { +- if (other.isSetKeyspace()) { +- this.keyspace = other.keyspace; +- } +- } +- +- public describe_keyspace_args deepCopy() { +- return new describe_keyspace_args(this); +- } +- +- @Override +- public void clear() { +- this.keyspace = null; +- } +- +- public String getKeyspace() { +- return this.keyspace; +- } +- +- public describe_keyspace_args setKeyspace(String keyspace) { +- this.keyspace = keyspace; +- return this; +- } +- +- public void unsetKeyspace() { +- this.keyspace = null; +- } +- +- /** Returns true if field keyspace is set (has been assigned a value) and false otherwise */ +- public boolean isSetKeyspace() { +- return this.keyspace != null; +- } +- +- public void setKeyspaceIsSet(boolean value) { +- if (!value) { +- this.keyspace = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEYSPACE: +- if (value == null) { +- unsetKeyspace(); +- } else { +- setKeyspace((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEYSPACE: +- return getKeyspace(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEYSPACE: +- return isSetKeyspace(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_keyspace_args) +- return this.equals((describe_keyspace_args)that); +- return false; +- } +- +- public boolean equals(describe_keyspace_args that) { +- if (that == null) +- return false; +- +- boolean this_present_keyspace = true && this.isSetKeyspace(); +- boolean that_present_keyspace = true && that.isSetKeyspace(); +- if (this_present_keyspace || that_present_keyspace) { +- if (!(this_present_keyspace && that_present_keyspace)) +- return false; +- if (!this.keyspace.equals(that.keyspace)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_keyspace = true && (isSetKeyspace()); +- builder.append(present_keyspace); +- if (present_keyspace) +- builder.append(keyspace); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_keyspace_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKeyspace()).compareTo(other.isSetKeyspace()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKeyspace()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keyspace, other.keyspace); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_keyspace_args("); +- boolean first = true; +- +- sb.append("keyspace:"); +- if (this.keyspace == null) { +- sb.append("null"); +- } else { +- sb.append(this.keyspace); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (keyspace == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'keyspace' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_keyspace_argsStandardSchemeFactory implements SchemeFactory { +- public describe_keyspace_argsStandardScheme getScheme() { +- return new describe_keyspace_argsStandardScheme(); +- } +- } +- +- private static class describe_keyspace_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_keyspace_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEYSPACE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_keyspace_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.keyspace != null) { +- oprot.writeFieldBegin(KEYSPACE_FIELD_DESC); +- oprot.writeString(struct.keyspace); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_keyspace_argsTupleSchemeFactory implements SchemeFactory { +- public describe_keyspace_argsTupleScheme getScheme() { +- return new describe_keyspace_argsTupleScheme(); +- } +- } +- +- private static class describe_keyspace_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_keyspace_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.keyspace); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_keyspace_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } +- } +- +- } +- +- public static class describe_keyspace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_keyspace_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); +- private static final org.apache.thrift.protocol.TField NFE_FIELD_DESC = new org.apache.thrift.protocol.TField("nfe", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_keyspace_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_keyspace_resultTupleSchemeFactory()); +- } +- +- public KsDef success; // required +- public NotFoundException nfe; // required +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- NFE((short)1, "nfe"), +- IRE((short)2, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // NFE +- return NFE; +- case 2: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, KsDef.class))); +- tmpMap.put(_Fields.NFE, new org.apache.thrift.meta_data.FieldMetaData("nfe", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_keyspace_result.class, metaDataMap); +- } +- +- public describe_keyspace_result() { +- } +- +- public describe_keyspace_result( +- KsDef success, +- NotFoundException nfe, +- InvalidRequestException ire) +- { +- this(); +- this.success = success; +- this.nfe = nfe; +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_keyspace_result(describe_keyspace_result other) { +- if (other.isSetSuccess()) { +- this.success = new KsDef(other.success); +- } +- if (other.isSetNfe()) { +- this.nfe = new NotFoundException(other.nfe); +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public describe_keyspace_result deepCopy() { +- return new describe_keyspace_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.nfe = null; +- this.ire = null; +- } +- +- public KsDef getSuccess() { +- return this.success; +- } +- +- public describe_keyspace_result setSuccess(KsDef success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public NotFoundException getNfe() { +- return this.nfe; +- } +- +- public describe_keyspace_result setNfe(NotFoundException nfe) { +- this.nfe = nfe; +- return this; +- } +- +- public void unsetNfe() { +- this.nfe = null; +- } +- +- /** Returns true if field nfe is set (has been assigned a value) and false otherwise */ +- public boolean isSetNfe() { +- return this.nfe != null; +- } +- +- public void setNfeIsSet(boolean value) { +- if (!value) { +- this.nfe = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public describe_keyspace_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((KsDef)value); +- } +- break; +- +- case NFE: +- if (value == null) { +- unsetNfe(); +- } else { +- setNfe((NotFoundException)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case NFE: +- return getNfe(); +- +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case NFE: +- return isSetNfe(); +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_keyspace_result) +- return this.equals((describe_keyspace_result)that); +- return false; +- } +- +- public boolean equals(describe_keyspace_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_nfe = true && this.isSetNfe(); +- boolean that_present_nfe = true && that.isSetNfe(); +- if (this_present_nfe || that_present_nfe) { +- if (!(this_present_nfe && that_present_nfe)) +- return false; +- if (!this.nfe.equals(that.nfe)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_nfe = true && (isSetNfe()); +- builder.append(present_nfe); +- if (present_nfe) +- builder.append(nfe); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_keyspace_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetNfe()).compareTo(other.isSetNfe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetNfe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nfe, other.nfe); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_keyspace_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("nfe:"); +- if (this.nfe == null) { +- sb.append("null"); +- } else { +- sb.append(this.nfe); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (success != null) { +- success.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_keyspace_resultStandardSchemeFactory implements SchemeFactory { +- public describe_keyspace_resultStandardScheme getScheme() { +- return new describe_keyspace_resultStandardScheme(); +- } +- } +- +- private static class describe_keyspace_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_keyspace_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.success = new KsDef(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // NFE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.nfe = new NotFoundException(); +- struct.nfe.read(iprot); +- struct.setNfeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_keyspace_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- struct.success.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.nfe != null) { +- oprot.writeFieldBegin(NFE_FIELD_DESC); +- struct.nfe.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_keyspace_resultTupleSchemeFactory implements SchemeFactory { +- public describe_keyspace_resultTupleScheme getScheme() { +- return new describe_keyspace_resultTupleScheme(); +- } +- } +- +- private static class describe_keyspace_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_keyspace_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetNfe()) { +- optionals.set(1); +- } +- if (struct.isSetIre()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetSuccess()) { +- struct.success.write(oprot); +- } +- if (struct.isSetNfe()) { +- struct.nfe.write(oprot); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_keyspace_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.success = new KsDef(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.nfe = new NotFoundException(); +- struct.nfe.read(iprot); +- struct.setNfeIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_splits_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_splits_args"); +- +- private static final org.apache.thrift.protocol.TField CF_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("cfName", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField START_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("start_token", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField END_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("end_token", org.apache.thrift.protocol.TType.STRING, (short)3); +- private static final org.apache.thrift.protocol.TField KEYS_PER_SPLIT_FIELD_DESC = new org.apache.thrift.protocol.TField("keys_per_split", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_splits_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_splits_argsTupleSchemeFactory()); +- } +- +- public String cfName; // required +- public String start_token; // required +- public String end_token; // required +- public int keys_per_split; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- CF_NAME((short)1, "cfName"), +- START_TOKEN((short)2, "start_token"), +- END_TOKEN((short)3, "end_token"), +- KEYS_PER_SPLIT((short)4, "keys_per_split"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // CF_NAME +- return CF_NAME; +- case 2: // START_TOKEN +- return START_TOKEN; +- case 3: // END_TOKEN +- return END_TOKEN; +- case 4: // KEYS_PER_SPLIT +- return KEYS_PER_SPLIT; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __KEYS_PER_SPLIT_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.CF_NAME, new org.apache.thrift.meta_data.FieldMetaData("cfName", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.START_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("start_token", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.END_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("end_token", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.KEYS_PER_SPLIT, new org.apache.thrift.meta_data.FieldMetaData("keys_per_split", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_splits_args.class, metaDataMap); +- } +- +- public describe_splits_args() { +- } +- +- public describe_splits_args( +- String cfName, +- String start_token, +- String end_token, +- int keys_per_split) +- { +- this(); +- this.cfName = cfName; +- this.start_token = start_token; +- this.end_token = end_token; +- this.keys_per_split = keys_per_split; +- setKeys_per_splitIsSet(true); +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_splits_args(describe_splits_args other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetCfName()) { +- this.cfName = other.cfName; +- } +- if (other.isSetStart_token()) { +- this.start_token = other.start_token; +- } +- if (other.isSetEnd_token()) { +- this.end_token = other.end_token; +- } +- this.keys_per_split = other.keys_per_split; +- } +- +- public describe_splits_args deepCopy() { +- return new describe_splits_args(this); +- } +- +- @Override +- public void clear() { +- this.cfName = null; +- this.start_token = null; +- this.end_token = null; +- setKeys_per_splitIsSet(false); +- this.keys_per_split = 0; +- } +- +- public String getCfName() { +- return this.cfName; +- } +- +- public describe_splits_args setCfName(String cfName) { +- this.cfName = cfName; +- return this; +- } +- +- public void unsetCfName() { +- this.cfName = null; +- } +- +- /** Returns true if field cfName is set (has been assigned a value) and false otherwise */ +- public boolean isSetCfName() { +- return this.cfName != null; +- } +- +- public void setCfNameIsSet(boolean value) { +- if (!value) { +- this.cfName = null; +- } +- } +- +- public String getStart_token() { +- return this.start_token; +- } +- +- public describe_splits_args setStart_token(String start_token) { +- this.start_token = start_token; +- return this; +- } +- +- public void unsetStart_token() { +- this.start_token = null; +- } +- +- /** Returns true if field start_token is set (has been assigned a value) and false otherwise */ +- public boolean isSetStart_token() { +- return this.start_token != null; +- } +- +- public void setStart_tokenIsSet(boolean value) { +- if (!value) { +- this.start_token = null; +- } +- } +- +- public String getEnd_token() { +- return this.end_token; +- } +- +- public describe_splits_args setEnd_token(String end_token) { +- this.end_token = end_token; +- return this; +- } +- +- public void unsetEnd_token() { +- this.end_token = null; +- } +- +- /** Returns true if field end_token is set (has been assigned a value) and false otherwise */ +- public boolean isSetEnd_token() { +- return this.end_token != null; +- } +- +- public void setEnd_tokenIsSet(boolean value) { +- if (!value) { +- this.end_token = null; +- } +- } +- +- public int getKeys_per_split() { +- return this.keys_per_split; +- } +- +- public describe_splits_args setKeys_per_split(int keys_per_split) { +- this.keys_per_split = keys_per_split; +- setKeys_per_splitIsSet(true); +- return this; +- } +- +- public void unsetKeys_per_split() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __KEYS_PER_SPLIT_ISSET_ID); +- } +- +- /** Returns true if field keys_per_split is set (has been assigned a value) and false otherwise */ +- public boolean isSetKeys_per_split() { +- return EncodingUtils.testBit(__isset_bitfield, __KEYS_PER_SPLIT_ISSET_ID); +- } +- +- public void setKeys_per_splitIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __KEYS_PER_SPLIT_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case CF_NAME: +- if (value == null) { +- unsetCfName(); +- } else { +- setCfName((String)value); +- } +- break; +- +- case START_TOKEN: +- if (value == null) { +- unsetStart_token(); +- } else { +- setStart_token((String)value); +- } +- break; +- +- case END_TOKEN: +- if (value == null) { +- unsetEnd_token(); +- } else { +- setEnd_token((String)value); +- } +- break; +- +- case KEYS_PER_SPLIT: +- if (value == null) { +- unsetKeys_per_split(); +- } else { +- setKeys_per_split((Integer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case CF_NAME: +- return getCfName(); +- +- case START_TOKEN: +- return getStart_token(); +- +- case END_TOKEN: +- return getEnd_token(); +- +- case KEYS_PER_SPLIT: +- return Integer.valueOf(getKeys_per_split()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case CF_NAME: +- return isSetCfName(); +- case START_TOKEN: +- return isSetStart_token(); +- case END_TOKEN: +- return isSetEnd_token(); +- case KEYS_PER_SPLIT: +- return isSetKeys_per_split(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_splits_args) +- return this.equals((describe_splits_args)that); +- return false; +- } +- +- public boolean equals(describe_splits_args that) { +- if (that == null) +- return false; +- +- boolean this_present_cfName = true && this.isSetCfName(); +- boolean that_present_cfName = true && that.isSetCfName(); +- if (this_present_cfName || that_present_cfName) { +- if (!(this_present_cfName && that_present_cfName)) +- return false; +- if (!this.cfName.equals(that.cfName)) +- return false; +- } +- +- boolean this_present_start_token = true && this.isSetStart_token(); +- boolean that_present_start_token = true && that.isSetStart_token(); +- if (this_present_start_token || that_present_start_token) { +- if (!(this_present_start_token && that_present_start_token)) +- return false; +- if (!this.start_token.equals(that.start_token)) +- return false; +- } +- +- boolean this_present_end_token = true && this.isSetEnd_token(); +- boolean that_present_end_token = true && that.isSetEnd_token(); +- if (this_present_end_token || that_present_end_token) { +- if (!(this_present_end_token && that_present_end_token)) +- return false; +- if (!this.end_token.equals(that.end_token)) +- return false; +- } +- +- boolean this_present_keys_per_split = true; +- boolean that_present_keys_per_split = true; +- if (this_present_keys_per_split || that_present_keys_per_split) { +- if (!(this_present_keys_per_split && that_present_keys_per_split)) +- return false; +- if (this.keys_per_split != that.keys_per_split) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_cfName = true && (isSetCfName()); +- builder.append(present_cfName); +- if (present_cfName) +- builder.append(cfName); +- +- boolean present_start_token = true && (isSetStart_token()); +- builder.append(present_start_token); +- if (present_start_token) +- builder.append(start_token); +- +- boolean present_end_token = true && (isSetEnd_token()); +- builder.append(present_end_token); +- if (present_end_token) +- builder.append(end_token); +- +- boolean present_keys_per_split = true; +- builder.append(present_keys_per_split); +- if (present_keys_per_split) +- builder.append(keys_per_split); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_splits_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetCfName()).compareTo(other.isSetCfName()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCfName()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cfName, other.cfName); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetStart_token()).compareTo(other.isSetStart_token()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStart_token()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start_token, other.start_token); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetEnd_token()).compareTo(other.isSetEnd_token()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetEnd_token()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.end_token, other.end_token); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetKeys_per_split()).compareTo(other.isSetKeys_per_split()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKeys_per_split()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keys_per_split, other.keys_per_split); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_splits_args("); +- boolean first = true; +- +- sb.append("cfName:"); +- if (this.cfName == null) { +- sb.append("null"); +- } else { +- sb.append(this.cfName); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("start_token:"); +- if (this.start_token == null) { +- sb.append("null"); +- } else { +- sb.append(this.start_token); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("end_token:"); +- if (this.end_token == null) { +- sb.append("null"); +- } else { +- sb.append(this.end_token); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("keys_per_split:"); +- sb.append(this.keys_per_split); +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (cfName == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'cfName' was not present! Struct: " + toString()); +- } +- if (start_token == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'start_token' was not present! Struct: " + toString()); +- } +- if (end_token == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'end_token' was not present! Struct: " + toString()); +- } +- // alas, we cannot check 'keys_per_split' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_splits_argsStandardSchemeFactory implements SchemeFactory { +- public describe_splits_argsStandardScheme getScheme() { +- return new describe_splits_argsStandardScheme(); +- } +- } +- +- private static class describe_splits_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_splits_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // CF_NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.cfName = iprot.readString(); +- struct.setCfNameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // START_TOKEN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.start_token = iprot.readString(); +- struct.setStart_tokenIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // END_TOKEN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.end_token = iprot.readString(); +- struct.setEnd_tokenIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // KEYS_PER_SPLIT +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.keys_per_split = iprot.readI32(); +- struct.setKeys_per_splitIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetKeys_per_split()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'keys_per_split' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_splits_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.cfName != null) { +- oprot.writeFieldBegin(CF_NAME_FIELD_DESC); +- oprot.writeString(struct.cfName); +- oprot.writeFieldEnd(); +- } +- if (struct.start_token != null) { +- oprot.writeFieldBegin(START_TOKEN_FIELD_DESC); +- oprot.writeString(struct.start_token); +- oprot.writeFieldEnd(); +- } +- if (struct.end_token != null) { +- oprot.writeFieldBegin(END_TOKEN_FIELD_DESC); +- oprot.writeString(struct.end_token); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldBegin(KEYS_PER_SPLIT_FIELD_DESC); +- oprot.writeI32(struct.keys_per_split); +- oprot.writeFieldEnd(); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_splits_argsTupleSchemeFactory implements SchemeFactory { +- public describe_splits_argsTupleScheme getScheme() { +- return new describe_splits_argsTupleScheme(); +- } +- } +- +- private static class describe_splits_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_splits_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.cfName); +- oprot.writeString(struct.start_token); +- oprot.writeString(struct.end_token); +- oprot.writeI32(struct.keys_per_split); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_splits_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.cfName = iprot.readString(); +- struct.setCfNameIsSet(true); +- struct.start_token = iprot.readString(); +- struct.setStart_tokenIsSet(true); +- struct.end_token = iprot.readString(); +- struct.setEnd_tokenIsSet(true); +- struct.keys_per_split = iprot.readI32(); +- struct.setKeys_per_splitIsSet(true); +- } +- } +- +- } +- +- public static class describe_splits_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_splits_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_splits_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_splits_resultTupleSchemeFactory()); +- } +- +- public List success; // required +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_splits_result.class, metaDataMap); +- } +- +- public describe_splits_result() { +- } +- +- public describe_splits_result( +- List success, +- InvalidRequestException ire) +- { +- this(); +- this.success = success; +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_splits_result(describe_splits_result other) { +- if (other.isSetSuccess()) { +- List __this__success = new ArrayList(other.success); +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public describe_splits_result deepCopy() { +- return new describe_splits_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public java.util.Iterator getSuccessIterator() { +- return (this.success == null) ? null : this.success.iterator(); +- } +- +- public void addToSuccess(String elem) { +- if (this.success == null) { +- this.success = new ArrayList(); +- } +- this.success.add(elem); +- } +- +- public List getSuccess() { +- return this.success; +- } +- +- public describe_splits_result setSuccess(List success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public describe_splits_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((List)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_splits_result) +- return this.equals((describe_splits_result)that); +- return false; +- } +- +- public boolean equals(describe_splits_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_splits_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_splits_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_splits_resultStandardSchemeFactory implements SchemeFactory { +- public describe_splits_resultStandardScheme getScheme() { +- return new describe_splits_resultStandardScheme(); +- } +- } +- +- private static class describe_splits_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_splits_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list432 = iprot.readListBegin(); +- struct.success = new ArrayList(_list432.size); +- for (int _i433 = 0; _i433 < _list432.size; ++_i433) +- { +- String _elem434; +- _elem434 = iprot.readString(); +- struct.success.add(_elem434); +- } +- iprot.readListEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_splits_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); +- for (String _iter435 : struct.success) +- { +- oprot.writeString(_iter435); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_splits_resultTupleSchemeFactory implements SchemeFactory { +- public describe_splits_resultTupleScheme getScheme() { +- return new describe_splits_resultTupleScheme(); +- } +- } +- +- private static class describe_splits_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_splits_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (String _iter436 : struct.success) +- { +- oprot.writeString(_iter436); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_splits_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list437 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.success = new ArrayList(_list437.size); +- for (int _i438 = 0; _i438 < _list437.size; ++_i438) +- { +- String _elem439; +- _elem439 = iprot.readString(); +- struct.success.add(_elem439); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class trace_next_query_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("trace_next_query_args"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new trace_next_query_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new trace_next_query_argsTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(trace_next_query_args.class, metaDataMap); +- } +- +- public trace_next_query_args() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public trace_next_query_args(trace_next_query_args other) { +- } +- +- public trace_next_query_args deepCopy() { +- return new trace_next_query_args(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof trace_next_query_args) +- return this.equals((trace_next_query_args)that); +- return false; +- } +- +- public boolean equals(trace_next_query_args that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(trace_next_query_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("trace_next_query_args("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class trace_next_query_argsStandardSchemeFactory implements SchemeFactory { +- public trace_next_query_argsStandardScheme getScheme() { +- return new trace_next_query_argsStandardScheme(); +- } +- } +- +- private static class trace_next_query_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, trace_next_query_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, trace_next_query_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class trace_next_query_argsTupleSchemeFactory implements SchemeFactory { +- public trace_next_query_argsTupleScheme getScheme() { +- return new trace_next_query_argsTupleScheme(); +- } +- } +- +- private static class trace_next_query_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, trace_next_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, trace_next_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +- } +- +- public static class trace_next_query_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("trace_next_query_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new trace_next_query_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new trace_next_query_resultTupleSchemeFactory()); +- } +- +- public ByteBuffer success; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(trace_next_query_result.class, metaDataMap); +- } +- +- public trace_next_query_result() { +- } +- +- public trace_next_query_result( +- ByteBuffer success) +- { +- this(); +- this.success = success; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public trace_next_query_result(trace_next_query_result other) { +- if (other.isSetSuccess()) { +- this.success = org.apache.thrift.TBaseHelper.copyBinary(other.success); +-; +- } +- } +- +- public trace_next_query_result deepCopy() { +- return new trace_next_query_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- } +- +- public byte[] getSuccess() { +- setSuccess(org.apache.thrift.TBaseHelper.rightSize(success)); +- return success == null ? null : success.array(); +- } +- +- public ByteBuffer bufferForSuccess() { +- return success; +- } +- +- public trace_next_query_result setSuccess(byte[] success) { +- setSuccess(success == null ? (ByteBuffer)null : ByteBuffer.wrap(success)); +- return this; +- } +- +- public trace_next_query_result setSuccess(ByteBuffer success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((ByteBuffer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof trace_next_query_result) +- return this.equals((trace_next_query_result)that); +- return false; +- } +- +- public boolean equals(trace_next_query_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(trace_next_query_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("trace_next_query_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.success, sb); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class trace_next_query_resultStandardSchemeFactory implements SchemeFactory { +- public trace_next_query_resultStandardScheme getScheme() { +- return new trace_next_query_resultStandardScheme(); +- } +- } +- +- private static class trace_next_query_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, trace_next_query_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readBinary(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, trace_next_query_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeBinary(struct.success); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class trace_next_query_resultTupleSchemeFactory implements SchemeFactory { +- public trace_next_query_resultTupleScheme getScheme() { +- return new trace_next_query_resultTupleScheme(); +- } +- } +- +- private static class trace_next_query_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, trace_next_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- oprot.writeBitSet(optionals, 1); +- if (struct.isSetSuccess()) { +- oprot.writeBinary(struct.success); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, trace_next_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(1); +- if (incoming.get(0)) { +- struct.success = iprot.readBinary(); +- struct.setSuccessIsSet(true); +- } +- } +- } +- +- } +- +- public static class describe_splits_ex_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_splits_ex_args"); +- +- private static final org.apache.thrift.protocol.TField CF_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("cfName", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField START_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("start_token", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField END_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("end_token", org.apache.thrift.protocol.TType.STRING, (short)3); +- private static final org.apache.thrift.protocol.TField KEYS_PER_SPLIT_FIELD_DESC = new org.apache.thrift.protocol.TField("keys_per_split", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_splits_ex_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_splits_ex_argsTupleSchemeFactory()); +- } +- +- public String cfName; // required +- public String start_token; // required +- public String end_token; // required +- public int keys_per_split; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- CF_NAME((short)1, "cfName"), +- START_TOKEN((short)2, "start_token"), +- END_TOKEN((short)3, "end_token"), +- KEYS_PER_SPLIT((short)4, "keys_per_split"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // CF_NAME +- return CF_NAME; +- case 2: // START_TOKEN +- return START_TOKEN; +- case 3: // END_TOKEN +- return END_TOKEN; +- case 4: // KEYS_PER_SPLIT +- return KEYS_PER_SPLIT; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __KEYS_PER_SPLIT_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.CF_NAME, new org.apache.thrift.meta_data.FieldMetaData("cfName", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.START_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("start_token", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.END_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("end_token", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.KEYS_PER_SPLIT, new org.apache.thrift.meta_data.FieldMetaData("keys_per_split", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_splits_ex_args.class, metaDataMap); +- } +- +- public describe_splits_ex_args() { +- } +- +- public describe_splits_ex_args( +- String cfName, +- String start_token, +- String end_token, +- int keys_per_split) +- { +- this(); +- this.cfName = cfName; +- this.start_token = start_token; +- this.end_token = end_token; +- this.keys_per_split = keys_per_split; +- setKeys_per_splitIsSet(true); +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_splits_ex_args(describe_splits_ex_args other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetCfName()) { +- this.cfName = other.cfName; +- } +- if (other.isSetStart_token()) { +- this.start_token = other.start_token; +- } +- if (other.isSetEnd_token()) { +- this.end_token = other.end_token; +- } +- this.keys_per_split = other.keys_per_split; +- } +- +- public describe_splits_ex_args deepCopy() { +- return new describe_splits_ex_args(this); +- } +- +- @Override +- public void clear() { +- this.cfName = null; +- this.start_token = null; +- this.end_token = null; +- setKeys_per_splitIsSet(false); +- this.keys_per_split = 0; +- } +- +- public String getCfName() { +- return this.cfName; +- } +- +- public describe_splits_ex_args setCfName(String cfName) { +- this.cfName = cfName; +- return this; +- } +- +- public void unsetCfName() { +- this.cfName = null; +- } +- +- /** Returns true if field cfName is set (has been assigned a value) and false otherwise */ +- public boolean isSetCfName() { +- return this.cfName != null; +- } +- +- public void setCfNameIsSet(boolean value) { +- if (!value) { +- this.cfName = null; +- } +- } +- +- public String getStart_token() { +- return this.start_token; +- } +- +- public describe_splits_ex_args setStart_token(String start_token) { +- this.start_token = start_token; +- return this; +- } +- +- public void unsetStart_token() { +- this.start_token = null; +- } +- +- /** Returns true if field start_token is set (has been assigned a value) and false otherwise */ +- public boolean isSetStart_token() { +- return this.start_token != null; +- } +- +- public void setStart_tokenIsSet(boolean value) { +- if (!value) { +- this.start_token = null; +- } +- } +- +- public String getEnd_token() { +- return this.end_token; +- } +- +- public describe_splits_ex_args setEnd_token(String end_token) { +- this.end_token = end_token; +- return this; +- } +- +- public void unsetEnd_token() { +- this.end_token = null; +- } +- +- /** Returns true if field end_token is set (has been assigned a value) and false otherwise */ +- public boolean isSetEnd_token() { +- return this.end_token != null; +- } +- +- public void setEnd_tokenIsSet(boolean value) { +- if (!value) { +- this.end_token = null; +- } +- } +- +- public int getKeys_per_split() { +- return this.keys_per_split; +- } +- +- public describe_splits_ex_args setKeys_per_split(int keys_per_split) { +- this.keys_per_split = keys_per_split; +- setKeys_per_splitIsSet(true); +- return this; +- } +- +- public void unsetKeys_per_split() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __KEYS_PER_SPLIT_ISSET_ID); +- } +- +- /** Returns true if field keys_per_split is set (has been assigned a value) and false otherwise */ +- public boolean isSetKeys_per_split() { +- return EncodingUtils.testBit(__isset_bitfield, __KEYS_PER_SPLIT_ISSET_ID); +- } +- +- public void setKeys_per_splitIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __KEYS_PER_SPLIT_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case CF_NAME: +- if (value == null) { +- unsetCfName(); +- } else { +- setCfName((String)value); +- } +- break; +- +- case START_TOKEN: +- if (value == null) { +- unsetStart_token(); +- } else { +- setStart_token((String)value); +- } +- break; +- +- case END_TOKEN: +- if (value == null) { +- unsetEnd_token(); +- } else { +- setEnd_token((String)value); +- } +- break; +- +- case KEYS_PER_SPLIT: +- if (value == null) { +- unsetKeys_per_split(); +- } else { +- setKeys_per_split((Integer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case CF_NAME: +- return getCfName(); +- +- case START_TOKEN: +- return getStart_token(); +- +- case END_TOKEN: +- return getEnd_token(); +- +- case KEYS_PER_SPLIT: +- return Integer.valueOf(getKeys_per_split()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case CF_NAME: +- return isSetCfName(); +- case START_TOKEN: +- return isSetStart_token(); +- case END_TOKEN: +- return isSetEnd_token(); +- case KEYS_PER_SPLIT: +- return isSetKeys_per_split(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_splits_ex_args) +- return this.equals((describe_splits_ex_args)that); +- return false; +- } +- +- public boolean equals(describe_splits_ex_args that) { +- if (that == null) +- return false; +- +- boolean this_present_cfName = true && this.isSetCfName(); +- boolean that_present_cfName = true && that.isSetCfName(); +- if (this_present_cfName || that_present_cfName) { +- if (!(this_present_cfName && that_present_cfName)) +- return false; +- if (!this.cfName.equals(that.cfName)) +- return false; +- } +- +- boolean this_present_start_token = true && this.isSetStart_token(); +- boolean that_present_start_token = true && that.isSetStart_token(); +- if (this_present_start_token || that_present_start_token) { +- if (!(this_present_start_token && that_present_start_token)) +- return false; +- if (!this.start_token.equals(that.start_token)) +- return false; +- } +- +- boolean this_present_end_token = true && this.isSetEnd_token(); +- boolean that_present_end_token = true && that.isSetEnd_token(); +- if (this_present_end_token || that_present_end_token) { +- if (!(this_present_end_token && that_present_end_token)) +- return false; +- if (!this.end_token.equals(that.end_token)) +- return false; +- } +- +- boolean this_present_keys_per_split = true; +- boolean that_present_keys_per_split = true; +- if (this_present_keys_per_split || that_present_keys_per_split) { +- if (!(this_present_keys_per_split && that_present_keys_per_split)) +- return false; +- if (this.keys_per_split != that.keys_per_split) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_cfName = true && (isSetCfName()); +- builder.append(present_cfName); +- if (present_cfName) +- builder.append(cfName); +- +- boolean present_start_token = true && (isSetStart_token()); +- builder.append(present_start_token); +- if (present_start_token) +- builder.append(start_token); +- +- boolean present_end_token = true && (isSetEnd_token()); +- builder.append(present_end_token); +- if (present_end_token) +- builder.append(end_token); +- +- boolean present_keys_per_split = true; +- builder.append(present_keys_per_split); +- if (present_keys_per_split) +- builder.append(keys_per_split); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_splits_ex_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetCfName()).compareTo(other.isSetCfName()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCfName()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cfName, other.cfName); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetStart_token()).compareTo(other.isSetStart_token()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStart_token()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start_token, other.start_token); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetEnd_token()).compareTo(other.isSetEnd_token()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetEnd_token()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.end_token, other.end_token); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetKeys_per_split()).compareTo(other.isSetKeys_per_split()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKeys_per_split()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keys_per_split, other.keys_per_split); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_splits_ex_args("); +- boolean first = true; +- +- sb.append("cfName:"); +- if (this.cfName == null) { +- sb.append("null"); +- } else { +- sb.append(this.cfName); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("start_token:"); +- if (this.start_token == null) { +- sb.append("null"); +- } else { +- sb.append(this.start_token); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("end_token:"); +- if (this.end_token == null) { +- sb.append("null"); +- } else { +- sb.append(this.end_token); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("keys_per_split:"); +- sb.append(this.keys_per_split); +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (cfName == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'cfName' was not present! Struct: " + toString()); +- } +- if (start_token == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'start_token' was not present! Struct: " + toString()); +- } +- if (end_token == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'end_token' was not present! Struct: " + toString()); +- } +- // alas, we cannot check 'keys_per_split' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_splits_ex_argsStandardSchemeFactory implements SchemeFactory { +- public describe_splits_ex_argsStandardScheme getScheme() { +- return new describe_splits_ex_argsStandardScheme(); +- } +- } +- +- private static class describe_splits_ex_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_splits_ex_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // CF_NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.cfName = iprot.readString(); +- struct.setCfNameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // START_TOKEN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.start_token = iprot.readString(); +- struct.setStart_tokenIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // END_TOKEN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.end_token = iprot.readString(); +- struct.setEnd_tokenIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // KEYS_PER_SPLIT +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.keys_per_split = iprot.readI32(); +- struct.setKeys_per_splitIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetKeys_per_split()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'keys_per_split' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_splits_ex_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.cfName != null) { +- oprot.writeFieldBegin(CF_NAME_FIELD_DESC); +- oprot.writeString(struct.cfName); +- oprot.writeFieldEnd(); +- } +- if (struct.start_token != null) { +- oprot.writeFieldBegin(START_TOKEN_FIELD_DESC); +- oprot.writeString(struct.start_token); +- oprot.writeFieldEnd(); +- } +- if (struct.end_token != null) { +- oprot.writeFieldBegin(END_TOKEN_FIELD_DESC); +- oprot.writeString(struct.end_token); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldBegin(KEYS_PER_SPLIT_FIELD_DESC); +- oprot.writeI32(struct.keys_per_split); +- oprot.writeFieldEnd(); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_splits_ex_argsTupleSchemeFactory implements SchemeFactory { +- public describe_splits_ex_argsTupleScheme getScheme() { +- return new describe_splits_ex_argsTupleScheme(); +- } +- } +- +- private static class describe_splits_ex_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_splits_ex_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.cfName); +- oprot.writeString(struct.start_token); +- oprot.writeString(struct.end_token); +- oprot.writeI32(struct.keys_per_split); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_splits_ex_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.cfName = iprot.readString(); +- struct.setCfNameIsSet(true); +- struct.start_token = iprot.readString(); +- struct.setStart_tokenIsSet(true); +- struct.end_token = iprot.readString(); +- struct.setEnd_tokenIsSet(true); +- struct.keys_per_split = iprot.readI32(); +- struct.setKeys_per_splitIsSet(true); +- } +- } +- +- } +- +- public static class describe_splits_ex_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_splits_ex_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new describe_splits_ex_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new describe_splits_ex_resultTupleSchemeFactory()); +- } +- +- public List success; // required +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CfSplit.class)))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_splits_ex_result.class, metaDataMap); +- } +- +- public describe_splits_ex_result() { +- } +- +- public describe_splits_ex_result( +- List success, +- InvalidRequestException ire) +- { +- this(); +- this.success = success; +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public describe_splits_ex_result(describe_splits_ex_result other) { +- if (other.isSetSuccess()) { +- List __this__success = new ArrayList(other.success.size()); +- for (CfSplit other_element : other.success) { +- __this__success.add(new CfSplit(other_element)); +- } +- this.success = __this__success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public describe_splits_ex_result deepCopy() { +- return new describe_splits_ex_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- } +- +- public int getSuccessSize() { +- return (this.success == null) ? 0 : this.success.size(); +- } +- +- public java.util.Iterator getSuccessIterator() { +- return (this.success == null) ? null : this.success.iterator(); +- } +- +- public void addToSuccess(CfSplit elem) { +- if (this.success == null) { +- this.success = new ArrayList(); +- } +- this.success.add(elem); +- } +- +- public List getSuccess() { +- return this.success; +- } +- +- public describe_splits_ex_result setSuccess(List success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public describe_splits_ex_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((List)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof describe_splits_ex_result) +- return this.equals((describe_splits_ex_result)that); +- return false; +- } +- +- public boolean equals(describe_splits_ex_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(describe_splits_ex_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("describe_splits_ex_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class describe_splits_ex_resultStandardSchemeFactory implements SchemeFactory { +- public describe_splits_ex_resultStandardScheme getScheme() { +- return new describe_splits_ex_resultStandardScheme(); +- } +- } +- +- private static class describe_splits_ex_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, describe_splits_ex_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list440 = iprot.readListBegin(); +- struct.success = new ArrayList(_list440.size); +- for (int _i441 = 0; _i441 < _list440.size; ++_i441) +- { +- CfSplit _elem442; +- _elem442 = new CfSplit(); +- _elem442.read(iprot); +- struct.success.add(_elem442); +- } +- iprot.readListEnd(); +- } +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, describe_splits_ex_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); +- for (CfSplit _iter443 : struct.success) +- { +- _iter443.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class describe_splits_ex_resultTupleSchemeFactory implements SchemeFactory { +- public describe_splits_ex_resultTupleScheme getScheme() { +- return new describe_splits_ex_resultTupleScheme(); +- } +- } +- +- private static class describe_splits_ex_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, describe_splits_ex_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetSuccess()) { +- { +- oprot.writeI32(struct.success.size()); +- for (CfSplit _iter444 : struct.success) +- { +- _iter444.write(oprot); +- } +- } +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, describe_splits_ex_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list445 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.success = new ArrayList(_list445.size); +- for (int _i446 = 0; _i446 < _list445.size; ++_i446) +- { +- CfSplit _elem447; +- _elem447 = new CfSplit(); +- _elem447.read(iprot); +- struct.success.add(_elem447); +- } +- } +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class system_add_column_family_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_add_column_family_args"); +- +- private static final org.apache.thrift.protocol.TField CF_DEF_FIELD_DESC = new org.apache.thrift.protocol.TField("cf_def", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_add_column_family_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_add_column_family_argsTupleSchemeFactory()); +- } +- +- public CfDef cf_def; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- CF_DEF((short)1, "cf_def"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // CF_DEF +- return CF_DEF; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.CF_DEF, new org.apache.thrift.meta_data.FieldMetaData("cf_def", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CfDef.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_add_column_family_args.class, metaDataMap); +- } +- +- public system_add_column_family_args() { +- } +- +- public system_add_column_family_args( +- CfDef cf_def) +- { +- this(); +- this.cf_def = cf_def; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_add_column_family_args(system_add_column_family_args other) { +- if (other.isSetCf_def()) { +- this.cf_def = new CfDef(other.cf_def); +- } +- } +- +- public system_add_column_family_args deepCopy() { +- return new system_add_column_family_args(this); +- } +- +- @Override +- public void clear() { +- this.cf_def = null; +- } +- +- public CfDef getCf_def() { +- return this.cf_def; +- } +- +- public system_add_column_family_args setCf_def(CfDef cf_def) { +- this.cf_def = cf_def; +- return this; +- } +- +- public void unsetCf_def() { +- this.cf_def = null; +- } +- +- /** Returns true if field cf_def is set (has been assigned a value) and false otherwise */ +- public boolean isSetCf_def() { +- return this.cf_def != null; +- } +- +- public void setCf_defIsSet(boolean value) { +- if (!value) { +- this.cf_def = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case CF_DEF: +- if (value == null) { +- unsetCf_def(); +- } else { +- setCf_def((CfDef)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case CF_DEF: +- return getCf_def(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case CF_DEF: +- return isSetCf_def(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_add_column_family_args) +- return this.equals((system_add_column_family_args)that); +- return false; +- } +- +- public boolean equals(system_add_column_family_args that) { +- if (that == null) +- return false; +- +- boolean this_present_cf_def = true && this.isSetCf_def(); +- boolean that_present_cf_def = true && that.isSetCf_def(); +- if (this_present_cf_def || that_present_cf_def) { +- if (!(this_present_cf_def && that_present_cf_def)) +- return false; +- if (!this.cf_def.equals(that.cf_def)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_cf_def = true && (isSetCf_def()); +- builder.append(present_cf_def); +- if (present_cf_def) +- builder.append(cf_def); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_add_column_family_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetCf_def()).compareTo(other.isSetCf_def()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCf_def()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cf_def, other.cf_def); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_add_column_family_args("); +- boolean first = true; +- +- sb.append("cf_def:"); +- if (this.cf_def == null) { +- sb.append("null"); +- } else { +- sb.append(this.cf_def); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (cf_def == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'cf_def' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (cf_def != null) { +- cf_def.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_add_column_family_argsStandardSchemeFactory implements SchemeFactory { +- public system_add_column_family_argsStandardScheme getScheme() { +- return new system_add_column_family_argsStandardScheme(); +- } +- } +- +- private static class system_add_column_family_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_add_column_family_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // CF_DEF +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.cf_def = new CfDef(); +- struct.cf_def.read(iprot); +- struct.setCf_defIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_add_column_family_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.cf_def != null) { +- oprot.writeFieldBegin(CF_DEF_FIELD_DESC); +- struct.cf_def.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_add_column_family_argsTupleSchemeFactory implements SchemeFactory { +- public system_add_column_family_argsTupleScheme getScheme() { +- return new system_add_column_family_argsTupleScheme(); +- } +- } +- +- private static class system_add_column_family_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_add_column_family_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- struct.cf_def.write(oprot); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_add_column_family_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.cf_def = new CfDef(); +- struct.cf_def.read(iprot); +- struct.setCf_defIsSet(true); +- } +- } +- +- } +- +- public static class system_add_column_family_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_add_column_family_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField SDE_FIELD_DESC = new org.apache.thrift.protocol.TField("sde", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_add_column_family_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_add_column_family_resultTupleSchemeFactory()); +- } +- +- public String success; // required +- public InvalidRequestException ire; // required +- public SchemaDisagreementException sde; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- SDE((short)2, "sde"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // SDE +- return SDE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.SDE, new org.apache.thrift.meta_data.FieldMetaData("sde", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_add_column_family_result.class, metaDataMap); +- } +- +- public system_add_column_family_result() { +- } +- +- public system_add_column_family_result( +- String success, +- InvalidRequestException ire, +- SchemaDisagreementException sde) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.sde = sde; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_add_column_family_result(system_add_column_family_result other) { +- if (other.isSetSuccess()) { +- this.success = other.success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetSde()) { +- this.sde = new SchemaDisagreementException(other.sde); +- } +- } +- +- public system_add_column_family_result deepCopy() { +- return new system_add_column_family_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.sde = null; +- } +- +- public String getSuccess() { +- return this.success; +- } +- +- public system_add_column_family_result setSuccess(String success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public system_add_column_family_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public SchemaDisagreementException getSde() { +- return this.sde; +- } +- +- public system_add_column_family_result setSde(SchemaDisagreementException sde) { +- this.sde = sde; +- return this; +- } +- +- public void unsetSde() { +- this.sde = null; +- } +- +- /** Returns true if field sde is set (has been assigned a value) and false otherwise */ +- public boolean isSetSde() { +- return this.sde != null; +- } +- +- public void setSdeIsSet(boolean value) { +- if (!value) { +- this.sde = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((String)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case SDE: +- if (value == null) { +- unsetSde(); +- } else { +- setSde((SchemaDisagreementException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case SDE: +- return getSde(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case SDE: +- return isSetSde(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_add_column_family_result) +- return this.equals((system_add_column_family_result)that); +- return false; +- } +- +- public boolean equals(system_add_column_family_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_sde = true && this.isSetSde(); +- boolean that_present_sde = true && that.isSetSde(); +- if (this_present_sde || that_present_sde) { +- if (!(this_present_sde && that_present_sde)) +- return false; +- if (!this.sde.equals(that.sde)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_sde = true && (isSetSde()); +- builder.append(present_sde); +- if (present_sde) +- builder.append(sde); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_add_column_family_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSde()).compareTo(other.isSetSde()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSde()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sde, other.sde); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_add_column_family_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("sde:"); +- if (this.sde == null) { +- sb.append("null"); +- } else { +- sb.append(this.sde); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_add_column_family_resultStandardSchemeFactory implements SchemeFactory { +- public system_add_column_family_resultStandardScheme getScheme() { +- return new system_add_column_family_resultStandardScheme(); +- } +- } +- +- private static class system_add_column_family_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_add_column_family_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // SDE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_add_column_family_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeString(struct.success); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.sde != null) { +- oprot.writeFieldBegin(SDE_FIELD_DESC); +- struct.sde.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_add_column_family_resultTupleSchemeFactory implements SchemeFactory { +- public system_add_column_family_resultTupleScheme getScheme() { +- return new system_add_column_family_resultTupleScheme(); +- } +- } +- +- private static class system_add_column_family_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_add_column_family_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetSde()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetSuccess()) { +- oprot.writeString(struct.success); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetSde()) { +- struct.sde.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_add_column_family_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } +- } +- } +- +- } +- +- public static class system_drop_column_family_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_drop_column_family_args"); +- +- private static final org.apache.thrift.protocol.TField COLUMN_FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("column_family", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_drop_column_family_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_drop_column_family_argsTupleSchemeFactory()); +- } +- +- public String column_family; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- COLUMN_FAMILY((short)1, "column_family"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // COLUMN_FAMILY +- return COLUMN_FAMILY; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.COLUMN_FAMILY, new org.apache.thrift.meta_data.FieldMetaData("column_family", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_drop_column_family_args.class, metaDataMap); +- } +- +- public system_drop_column_family_args() { +- } +- +- public system_drop_column_family_args( +- String column_family) +- { +- this(); +- this.column_family = column_family; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_drop_column_family_args(system_drop_column_family_args other) { +- if (other.isSetColumn_family()) { +- this.column_family = other.column_family; +- } +- } +- +- public system_drop_column_family_args deepCopy() { +- return new system_drop_column_family_args(this); +- } +- +- @Override +- public void clear() { +- this.column_family = null; +- } +- +- public String getColumn_family() { +- return this.column_family; +- } +- +- public system_drop_column_family_args setColumn_family(String column_family) { +- this.column_family = column_family; +- return this; +- } +- +- public void unsetColumn_family() { +- this.column_family = null; +- } +- +- /** Returns true if field column_family is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_family() { +- return this.column_family != null; +- } +- +- public void setColumn_familyIsSet(boolean value) { +- if (!value) { +- this.column_family = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case COLUMN_FAMILY: +- if (value == null) { +- unsetColumn_family(); +- } else { +- setColumn_family((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case COLUMN_FAMILY: +- return getColumn_family(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case COLUMN_FAMILY: +- return isSetColumn_family(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_drop_column_family_args) +- return this.equals((system_drop_column_family_args)that); +- return false; +- } +- +- public boolean equals(system_drop_column_family_args that) { +- if (that == null) +- return false; +- +- boolean this_present_column_family = true && this.isSetColumn_family(); +- boolean that_present_column_family = true && that.isSetColumn_family(); +- if (this_present_column_family || that_present_column_family) { +- if (!(this_present_column_family && that_present_column_family)) +- return false; +- if (!this.column_family.equals(that.column_family)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_column_family = true && (isSetColumn_family()); +- builder.append(present_column_family); +- if (present_column_family) +- builder.append(column_family); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_drop_column_family_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetColumn_family()).compareTo(other.isSetColumn_family()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_family()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_family, other.column_family); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_drop_column_family_args("); +- boolean first = true; +- +- sb.append("column_family:"); +- if (this.column_family == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_family); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (column_family == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_family' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_drop_column_family_argsStandardSchemeFactory implements SchemeFactory { +- public system_drop_column_family_argsStandardScheme getScheme() { +- return new system_drop_column_family_argsStandardScheme(); +- } +- } +- +- private static class system_drop_column_family_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_drop_column_family_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // COLUMN_FAMILY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.column_family = iprot.readString(); +- struct.setColumn_familyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_drop_column_family_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.column_family != null) { +- oprot.writeFieldBegin(COLUMN_FAMILY_FIELD_DESC); +- oprot.writeString(struct.column_family); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_drop_column_family_argsTupleSchemeFactory implements SchemeFactory { +- public system_drop_column_family_argsTupleScheme getScheme() { +- return new system_drop_column_family_argsTupleScheme(); +- } +- } +- +- private static class system_drop_column_family_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_drop_column_family_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.column_family); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_drop_column_family_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.column_family = iprot.readString(); +- struct.setColumn_familyIsSet(true); +- } +- } +- +- } +- +- public static class system_drop_column_family_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_drop_column_family_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField SDE_FIELD_DESC = new org.apache.thrift.protocol.TField("sde", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_drop_column_family_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_drop_column_family_resultTupleSchemeFactory()); +- } +- +- public String success; // required +- public InvalidRequestException ire; // required +- public SchemaDisagreementException sde; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- SDE((short)2, "sde"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // SDE +- return SDE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.SDE, new org.apache.thrift.meta_data.FieldMetaData("sde", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_drop_column_family_result.class, metaDataMap); +- } +- +- public system_drop_column_family_result() { +- } +- +- public system_drop_column_family_result( +- String success, +- InvalidRequestException ire, +- SchemaDisagreementException sde) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.sde = sde; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_drop_column_family_result(system_drop_column_family_result other) { +- if (other.isSetSuccess()) { +- this.success = other.success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetSde()) { +- this.sde = new SchemaDisagreementException(other.sde); +- } +- } +- +- public system_drop_column_family_result deepCopy() { +- return new system_drop_column_family_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.sde = null; +- } +- +- public String getSuccess() { +- return this.success; +- } +- +- public system_drop_column_family_result setSuccess(String success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public system_drop_column_family_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public SchemaDisagreementException getSde() { +- return this.sde; +- } +- +- public system_drop_column_family_result setSde(SchemaDisagreementException sde) { +- this.sde = sde; +- return this; +- } +- +- public void unsetSde() { +- this.sde = null; +- } +- +- /** Returns true if field sde is set (has been assigned a value) and false otherwise */ +- public boolean isSetSde() { +- return this.sde != null; +- } +- +- public void setSdeIsSet(boolean value) { +- if (!value) { +- this.sde = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((String)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case SDE: +- if (value == null) { +- unsetSde(); +- } else { +- setSde((SchemaDisagreementException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case SDE: +- return getSde(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case SDE: +- return isSetSde(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_drop_column_family_result) +- return this.equals((system_drop_column_family_result)that); +- return false; +- } +- +- public boolean equals(system_drop_column_family_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_sde = true && this.isSetSde(); +- boolean that_present_sde = true && that.isSetSde(); +- if (this_present_sde || that_present_sde) { +- if (!(this_present_sde && that_present_sde)) +- return false; +- if (!this.sde.equals(that.sde)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_sde = true && (isSetSde()); +- builder.append(present_sde); +- if (present_sde) +- builder.append(sde); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_drop_column_family_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSde()).compareTo(other.isSetSde()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSde()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sde, other.sde); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_drop_column_family_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("sde:"); +- if (this.sde == null) { +- sb.append("null"); +- } else { +- sb.append(this.sde); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_drop_column_family_resultStandardSchemeFactory implements SchemeFactory { +- public system_drop_column_family_resultStandardScheme getScheme() { +- return new system_drop_column_family_resultStandardScheme(); +- } +- } +- +- private static class system_drop_column_family_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_drop_column_family_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // SDE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_drop_column_family_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeString(struct.success); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.sde != null) { +- oprot.writeFieldBegin(SDE_FIELD_DESC); +- struct.sde.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_drop_column_family_resultTupleSchemeFactory implements SchemeFactory { +- public system_drop_column_family_resultTupleScheme getScheme() { +- return new system_drop_column_family_resultTupleScheme(); +- } +- } +- +- private static class system_drop_column_family_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_drop_column_family_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetSde()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetSuccess()) { +- oprot.writeString(struct.success); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetSde()) { +- struct.sde.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_drop_column_family_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } +- } +- } +- +- } +- +- public static class system_add_keyspace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_add_keyspace_args"); +- +- private static final org.apache.thrift.protocol.TField KS_DEF_FIELD_DESC = new org.apache.thrift.protocol.TField("ks_def", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_add_keyspace_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_add_keyspace_argsTupleSchemeFactory()); +- } +- +- public KsDef ks_def; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KS_DEF((short)1, "ks_def"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KS_DEF +- return KS_DEF; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KS_DEF, new org.apache.thrift.meta_data.FieldMetaData("ks_def", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, KsDef.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_add_keyspace_args.class, metaDataMap); +- } +- +- public system_add_keyspace_args() { +- } +- +- public system_add_keyspace_args( +- KsDef ks_def) +- { +- this(); +- this.ks_def = ks_def; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_add_keyspace_args(system_add_keyspace_args other) { +- if (other.isSetKs_def()) { +- this.ks_def = new KsDef(other.ks_def); +- } +- } +- +- public system_add_keyspace_args deepCopy() { +- return new system_add_keyspace_args(this); +- } +- +- @Override +- public void clear() { +- this.ks_def = null; +- } +- +- public KsDef getKs_def() { +- return this.ks_def; +- } +- +- public system_add_keyspace_args setKs_def(KsDef ks_def) { +- this.ks_def = ks_def; +- return this; +- } +- +- public void unsetKs_def() { +- this.ks_def = null; +- } +- +- /** Returns true if field ks_def is set (has been assigned a value) and false otherwise */ +- public boolean isSetKs_def() { +- return this.ks_def != null; +- } +- +- public void setKs_defIsSet(boolean value) { +- if (!value) { +- this.ks_def = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KS_DEF: +- if (value == null) { +- unsetKs_def(); +- } else { +- setKs_def((KsDef)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KS_DEF: +- return getKs_def(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KS_DEF: +- return isSetKs_def(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_add_keyspace_args) +- return this.equals((system_add_keyspace_args)that); +- return false; +- } +- +- public boolean equals(system_add_keyspace_args that) { +- if (that == null) +- return false; +- +- boolean this_present_ks_def = true && this.isSetKs_def(); +- boolean that_present_ks_def = true && that.isSetKs_def(); +- if (this_present_ks_def || that_present_ks_def) { +- if (!(this_present_ks_def && that_present_ks_def)) +- return false; +- if (!this.ks_def.equals(that.ks_def)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ks_def = true && (isSetKs_def()); +- builder.append(present_ks_def); +- if (present_ks_def) +- builder.append(ks_def); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_add_keyspace_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKs_def()).compareTo(other.isSetKs_def()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKs_def()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ks_def, other.ks_def); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_add_keyspace_args("); +- boolean first = true; +- +- sb.append("ks_def:"); +- if (this.ks_def == null) { +- sb.append("null"); +- } else { +- sb.append(this.ks_def); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (ks_def == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'ks_def' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (ks_def != null) { +- ks_def.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_add_keyspace_argsStandardSchemeFactory implements SchemeFactory { +- public system_add_keyspace_argsStandardScheme getScheme() { +- return new system_add_keyspace_argsStandardScheme(); +- } +- } +- +- private static class system_add_keyspace_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_add_keyspace_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KS_DEF +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ks_def = new KsDef(); +- struct.ks_def.read(iprot); +- struct.setKs_defIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_add_keyspace_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ks_def != null) { +- oprot.writeFieldBegin(KS_DEF_FIELD_DESC); +- struct.ks_def.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_add_keyspace_argsTupleSchemeFactory implements SchemeFactory { +- public system_add_keyspace_argsTupleScheme getScheme() { +- return new system_add_keyspace_argsTupleScheme(); +- } +- } +- +- private static class system_add_keyspace_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_add_keyspace_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- struct.ks_def.write(oprot); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_add_keyspace_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.ks_def = new KsDef(); +- struct.ks_def.read(iprot); +- struct.setKs_defIsSet(true); +- } +- } +- +- } +- +- public static class system_add_keyspace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_add_keyspace_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField SDE_FIELD_DESC = new org.apache.thrift.protocol.TField("sde", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_add_keyspace_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_add_keyspace_resultTupleSchemeFactory()); +- } +- +- public String success; // required +- public InvalidRequestException ire; // required +- public SchemaDisagreementException sde; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- SDE((short)2, "sde"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // SDE +- return SDE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.SDE, new org.apache.thrift.meta_data.FieldMetaData("sde", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_add_keyspace_result.class, metaDataMap); +- } +- +- public system_add_keyspace_result() { +- } +- +- public system_add_keyspace_result( +- String success, +- InvalidRequestException ire, +- SchemaDisagreementException sde) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.sde = sde; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_add_keyspace_result(system_add_keyspace_result other) { +- if (other.isSetSuccess()) { +- this.success = other.success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetSde()) { +- this.sde = new SchemaDisagreementException(other.sde); +- } +- } +- +- public system_add_keyspace_result deepCopy() { +- return new system_add_keyspace_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.sde = null; +- } +- +- public String getSuccess() { +- return this.success; +- } +- +- public system_add_keyspace_result setSuccess(String success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public system_add_keyspace_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public SchemaDisagreementException getSde() { +- return this.sde; +- } +- +- public system_add_keyspace_result setSde(SchemaDisagreementException sde) { +- this.sde = sde; +- return this; +- } +- +- public void unsetSde() { +- this.sde = null; +- } +- +- /** Returns true if field sde is set (has been assigned a value) and false otherwise */ +- public boolean isSetSde() { +- return this.sde != null; +- } +- +- public void setSdeIsSet(boolean value) { +- if (!value) { +- this.sde = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((String)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case SDE: +- if (value == null) { +- unsetSde(); +- } else { +- setSde((SchemaDisagreementException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case SDE: +- return getSde(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case SDE: +- return isSetSde(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_add_keyspace_result) +- return this.equals((system_add_keyspace_result)that); +- return false; +- } +- +- public boolean equals(system_add_keyspace_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_sde = true && this.isSetSde(); +- boolean that_present_sde = true && that.isSetSde(); +- if (this_present_sde || that_present_sde) { +- if (!(this_present_sde && that_present_sde)) +- return false; +- if (!this.sde.equals(that.sde)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_sde = true && (isSetSde()); +- builder.append(present_sde); +- if (present_sde) +- builder.append(sde); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_add_keyspace_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSde()).compareTo(other.isSetSde()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSde()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sde, other.sde); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_add_keyspace_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("sde:"); +- if (this.sde == null) { +- sb.append("null"); +- } else { +- sb.append(this.sde); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_add_keyspace_resultStandardSchemeFactory implements SchemeFactory { +- public system_add_keyspace_resultStandardScheme getScheme() { +- return new system_add_keyspace_resultStandardScheme(); +- } +- } +- +- private static class system_add_keyspace_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_add_keyspace_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // SDE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_add_keyspace_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeString(struct.success); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.sde != null) { +- oprot.writeFieldBegin(SDE_FIELD_DESC); +- struct.sde.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_add_keyspace_resultTupleSchemeFactory implements SchemeFactory { +- public system_add_keyspace_resultTupleScheme getScheme() { +- return new system_add_keyspace_resultTupleScheme(); +- } +- } +- +- private static class system_add_keyspace_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_add_keyspace_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetSde()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetSuccess()) { +- oprot.writeString(struct.success); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetSde()) { +- struct.sde.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_add_keyspace_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } +- } +- } +- +- } +- +- public static class system_drop_keyspace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_drop_keyspace_args"); +- +- private static final org.apache.thrift.protocol.TField KEYSPACE_FIELD_DESC = new org.apache.thrift.protocol.TField("keyspace", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_drop_keyspace_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_drop_keyspace_argsTupleSchemeFactory()); +- } +- +- public String keyspace; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEYSPACE((short)1, "keyspace"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEYSPACE +- return KEYSPACE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEYSPACE, new org.apache.thrift.meta_data.FieldMetaData("keyspace", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_drop_keyspace_args.class, metaDataMap); +- } +- +- public system_drop_keyspace_args() { +- } +- +- public system_drop_keyspace_args( +- String keyspace) +- { +- this(); +- this.keyspace = keyspace; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_drop_keyspace_args(system_drop_keyspace_args other) { +- if (other.isSetKeyspace()) { +- this.keyspace = other.keyspace; +- } +- } +- +- public system_drop_keyspace_args deepCopy() { +- return new system_drop_keyspace_args(this); +- } +- +- @Override +- public void clear() { +- this.keyspace = null; +- } +- +- public String getKeyspace() { +- return this.keyspace; +- } +- +- public system_drop_keyspace_args setKeyspace(String keyspace) { +- this.keyspace = keyspace; +- return this; +- } +- +- public void unsetKeyspace() { +- this.keyspace = null; +- } +- +- /** Returns true if field keyspace is set (has been assigned a value) and false otherwise */ +- public boolean isSetKeyspace() { +- return this.keyspace != null; +- } +- +- public void setKeyspaceIsSet(boolean value) { +- if (!value) { +- this.keyspace = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEYSPACE: +- if (value == null) { +- unsetKeyspace(); +- } else { +- setKeyspace((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEYSPACE: +- return getKeyspace(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEYSPACE: +- return isSetKeyspace(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_drop_keyspace_args) +- return this.equals((system_drop_keyspace_args)that); +- return false; +- } +- +- public boolean equals(system_drop_keyspace_args that) { +- if (that == null) +- return false; +- +- boolean this_present_keyspace = true && this.isSetKeyspace(); +- boolean that_present_keyspace = true && that.isSetKeyspace(); +- if (this_present_keyspace || that_present_keyspace) { +- if (!(this_present_keyspace && that_present_keyspace)) +- return false; +- if (!this.keyspace.equals(that.keyspace)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_keyspace = true && (isSetKeyspace()); +- builder.append(present_keyspace); +- if (present_keyspace) +- builder.append(keyspace); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_drop_keyspace_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKeyspace()).compareTo(other.isSetKeyspace()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKeyspace()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keyspace, other.keyspace); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_drop_keyspace_args("); +- boolean first = true; +- +- sb.append("keyspace:"); +- if (this.keyspace == null) { +- sb.append("null"); +- } else { +- sb.append(this.keyspace); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (keyspace == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'keyspace' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_drop_keyspace_argsStandardSchemeFactory implements SchemeFactory { +- public system_drop_keyspace_argsStandardScheme getScheme() { +- return new system_drop_keyspace_argsStandardScheme(); +- } +- } +- +- private static class system_drop_keyspace_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_drop_keyspace_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEYSPACE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_drop_keyspace_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.keyspace != null) { +- oprot.writeFieldBegin(KEYSPACE_FIELD_DESC); +- oprot.writeString(struct.keyspace); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_drop_keyspace_argsTupleSchemeFactory implements SchemeFactory { +- public system_drop_keyspace_argsTupleScheme getScheme() { +- return new system_drop_keyspace_argsTupleScheme(); +- } +- } +- +- private static class system_drop_keyspace_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_drop_keyspace_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.keyspace); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_drop_keyspace_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } +- } +- +- } +- +- public static class system_drop_keyspace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_drop_keyspace_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField SDE_FIELD_DESC = new org.apache.thrift.protocol.TField("sde", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_drop_keyspace_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_drop_keyspace_resultTupleSchemeFactory()); +- } +- +- public String success; // required +- public InvalidRequestException ire; // required +- public SchemaDisagreementException sde; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- SDE((short)2, "sde"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // SDE +- return SDE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.SDE, new org.apache.thrift.meta_data.FieldMetaData("sde", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_drop_keyspace_result.class, metaDataMap); +- } +- +- public system_drop_keyspace_result() { +- } +- +- public system_drop_keyspace_result( +- String success, +- InvalidRequestException ire, +- SchemaDisagreementException sde) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.sde = sde; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_drop_keyspace_result(system_drop_keyspace_result other) { +- if (other.isSetSuccess()) { +- this.success = other.success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetSde()) { +- this.sde = new SchemaDisagreementException(other.sde); +- } +- } +- +- public system_drop_keyspace_result deepCopy() { +- return new system_drop_keyspace_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.sde = null; +- } +- +- public String getSuccess() { +- return this.success; +- } +- +- public system_drop_keyspace_result setSuccess(String success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public system_drop_keyspace_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public SchemaDisagreementException getSde() { +- return this.sde; +- } +- +- public system_drop_keyspace_result setSde(SchemaDisagreementException sde) { +- this.sde = sde; +- return this; +- } +- +- public void unsetSde() { +- this.sde = null; +- } +- +- /** Returns true if field sde is set (has been assigned a value) and false otherwise */ +- public boolean isSetSde() { +- return this.sde != null; +- } +- +- public void setSdeIsSet(boolean value) { +- if (!value) { +- this.sde = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((String)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case SDE: +- if (value == null) { +- unsetSde(); +- } else { +- setSde((SchemaDisagreementException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case SDE: +- return getSde(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case SDE: +- return isSetSde(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_drop_keyspace_result) +- return this.equals((system_drop_keyspace_result)that); +- return false; +- } +- +- public boolean equals(system_drop_keyspace_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_sde = true && this.isSetSde(); +- boolean that_present_sde = true && that.isSetSde(); +- if (this_present_sde || that_present_sde) { +- if (!(this_present_sde && that_present_sde)) +- return false; +- if (!this.sde.equals(that.sde)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_sde = true && (isSetSde()); +- builder.append(present_sde); +- if (present_sde) +- builder.append(sde); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_drop_keyspace_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSde()).compareTo(other.isSetSde()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSde()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sde, other.sde); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_drop_keyspace_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("sde:"); +- if (this.sde == null) { +- sb.append("null"); +- } else { +- sb.append(this.sde); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_drop_keyspace_resultStandardSchemeFactory implements SchemeFactory { +- public system_drop_keyspace_resultStandardScheme getScheme() { +- return new system_drop_keyspace_resultStandardScheme(); +- } +- } +- +- private static class system_drop_keyspace_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_drop_keyspace_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // SDE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_drop_keyspace_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeString(struct.success); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.sde != null) { +- oprot.writeFieldBegin(SDE_FIELD_DESC); +- struct.sde.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_drop_keyspace_resultTupleSchemeFactory implements SchemeFactory { +- public system_drop_keyspace_resultTupleScheme getScheme() { +- return new system_drop_keyspace_resultTupleScheme(); +- } +- } +- +- private static class system_drop_keyspace_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_drop_keyspace_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetSde()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetSuccess()) { +- oprot.writeString(struct.success); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetSde()) { +- struct.sde.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_drop_keyspace_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } +- } +- } +- +- } +- +- public static class system_update_keyspace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_update_keyspace_args"); +- +- private static final org.apache.thrift.protocol.TField KS_DEF_FIELD_DESC = new org.apache.thrift.protocol.TField("ks_def", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_update_keyspace_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_update_keyspace_argsTupleSchemeFactory()); +- } +- +- public KsDef ks_def; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KS_DEF((short)1, "ks_def"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KS_DEF +- return KS_DEF; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KS_DEF, new org.apache.thrift.meta_data.FieldMetaData("ks_def", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, KsDef.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_update_keyspace_args.class, metaDataMap); +- } +- +- public system_update_keyspace_args() { +- } +- +- public system_update_keyspace_args( +- KsDef ks_def) +- { +- this(); +- this.ks_def = ks_def; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_update_keyspace_args(system_update_keyspace_args other) { +- if (other.isSetKs_def()) { +- this.ks_def = new KsDef(other.ks_def); +- } +- } +- +- public system_update_keyspace_args deepCopy() { +- return new system_update_keyspace_args(this); +- } +- +- @Override +- public void clear() { +- this.ks_def = null; +- } +- +- public KsDef getKs_def() { +- return this.ks_def; +- } +- +- public system_update_keyspace_args setKs_def(KsDef ks_def) { +- this.ks_def = ks_def; +- return this; +- } +- +- public void unsetKs_def() { +- this.ks_def = null; +- } +- +- /** Returns true if field ks_def is set (has been assigned a value) and false otherwise */ +- public boolean isSetKs_def() { +- return this.ks_def != null; +- } +- +- public void setKs_defIsSet(boolean value) { +- if (!value) { +- this.ks_def = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KS_DEF: +- if (value == null) { +- unsetKs_def(); +- } else { +- setKs_def((KsDef)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KS_DEF: +- return getKs_def(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KS_DEF: +- return isSetKs_def(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_update_keyspace_args) +- return this.equals((system_update_keyspace_args)that); +- return false; +- } +- +- public boolean equals(system_update_keyspace_args that) { +- if (that == null) +- return false; +- +- boolean this_present_ks_def = true && this.isSetKs_def(); +- boolean that_present_ks_def = true && that.isSetKs_def(); +- if (this_present_ks_def || that_present_ks_def) { +- if (!(this_present_ks_def && that_present_ks_def)) +- return false; +- if (!this.ks_def.equals(that.ks_def)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ks_def = true && (isSetKs_def()); +- builder.append(present_ks_def); +- if (present_ks_def) +- builder.append(ks_def); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_update_keyspace_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKs_def()).compareTo(other.isSetKs_def()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKs_def()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ks_def, other.ks_def); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_update_keyspace_args("); +- boolean first = true; +- +- sb.append("ks_def:"); +- if (this.ks_def == null) { +- sb.append("null"); +- } else { +- sb.append(this.ks_def); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (ks_def == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'ks_def' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (ks_def != null) { +- ks_def.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_update_keyspace_argsStandardSchemeFactory implements SchemeFactory { +- public system_update_keyspace_argsStandardScheme getScheme() { +- return new system_update_keyspace_argsStandardScheme(); +- } +- } +- +- private static class system_update_keyspace_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_update_keyspace_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KS_DEF +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ks_def = new KsDef(); +- struct.ks_def.read(iprot); +- struct.setKs_defIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_update_keyspace_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ks_def != null) { +- oprot.writeFieldBegin(KS_DEF_FIELD_DESC); +- struct.ks_def.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_update_keyspace_argsTupleSchemeFactory implements SchemeFactory { +- public system_update_keyspace_argsTupleScheme getScheme() { +- return new system_update_keyspace_argsTupleScheme(); +- } +- } +- +- private static class system_update_keyspace_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_update_keyspace_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- struct.ks_def.write(oprot); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_update_keyspace_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.ks_def = new KsDef(); +- struct.ks_def.read(iprot); +- struct.setKs_defIsSet(true); +- } +- } +- +- } +- +- public static class system_update_keyspace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_update_keyspace_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField SDE_FIELD_DESC = new org.apache.thrift.protocol.TField("sde", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_update_keyspace_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_update_keyspace_resultTupleSchemeFactory()); +- } +- +- public String success; // required +- public InvalidRequestException ire; // required +- public SchemaDisagreementException sde; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- SDE((short)2, "sde"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // SDE +- return SDE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.SDE, new org.apache.thrift.meta_data.FieldMetaData("sde", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_update_keyspace_result.class, metaDataMap); +- } +- +- public system_update_keyspace_result() { +- } +- +- public system_update_keyspace_result( +- String success, +- InvalidRequestException ire, +- SchemaDisagreementException sde) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.sde = sde; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_update_keyspace_result(system_update_keyspace_result other) { +- if (other.isSetSuccess()) { +- this.success = other.success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetSde()) { +- this.sde = new SchemaDisagreementException(other.sde); +- } +- } +- +- public system_update_keyspace_result deepCopy() { +- return new system_update_keyspace_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.sde = null; +- } +- +- public String getSuccess() { +- return this.success; +- } +- +- public system_update_keyspace_result setSuccess(String success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public system_update_keyspace_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public SchemaDisagreementException getSde() { +- return this.sde; +- } +- +- public system_update_keyspace_result setSde(SchemaDisagreementException sde) { +- this.sde = sde; +- return this; +- } +- +- public void unsetSde() { +- this.sde = null; +- } +- +- /** Returns true if field sde is set (has been assigned a value) and false otherwise */ +- public boolean isSetSde() { +- return this.sde != null; +- } +- +- public void setSdeIsSet(boolean value) { +- if (!value) { +- this.sde = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((String)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case SDE: +- if (value == null) { +- unsetSde(); +- } else { +- setSde((SchemaDisagreementException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case SDE: +- return getSde(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case SDE: +- return isSetSde(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_update_keyspace_result) +- return this.equals((system_update_keyspace_result)that); +- return false; +- } +- +- public boolean equals(system_update_keyspace_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_sde = true && this.isSetSde(); +- boolean that_present_sde = true && that.isSetSde(); +- if (this_present_sde || that_present_sde) { +- if (!(this_present_sde && that_present_sde)) +- return false; +- if (!this.sde.equals(that.sde)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_sde = true && (isSetSde()); +- builder.append(present_sde); +- if (present_sde) +- builder.append(sde); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_update_keyspace_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSde()).compareTo(other.isSetSde()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSde()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sde, other.sde); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_update_keyspace_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("sde:"); +- if (this.sde == null) { +- sb.append("null"); +- } else { +- sb.append(this.sde); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_update_keyspace_resultStandardSchemeFactory implements SchemeFactory { +- public system_update_keyspace_resultStandardScheme getScheme() { +- return new system_update_keyspace_resultStandardScheme(); +- } +- } +- +- private static class system_update_keyspace_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_update_keyspace_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // SDE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_update_keyspace_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeString(struct.success); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.sde != null) { +- oprot.writeFieldBegin(SDE_FIELD_DESC); +- struct.sde.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_update_keyspace_resultTupleSchemeFactory implements SchemeFactory { +- public system_update_keyspace_resultTupleScheme getScheme() { +- return new system_update_keyspace_resultTupleScheme(); +- } +- } +- +- private static class system_update_keyspace_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_update_keyspace_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetSde()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetSuccess()) { +- oprot.writeString(struct.success); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetSde()) { +- struct.sde.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_update_keyspace_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } +- } +- } +- +- } +- +- public static class system_update_column_family_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_update_column_family_args"); +- +- private static final org.apache.thrift.protocol.TField CF_DEF_FIELD_DESC = new org.apache.thrift.protocol.TField("cf_def", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_update_column_family_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_update_column_family_argsTupleSchemeFactory()); +- } +- +- public CfDef cf_def; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- CF_DEF((short)1, "cf_def"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // CF_DEF +- return CF_DEF; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.CF_DEF, new org.apache.thrift.meta_data.FieldMetaData("cf_def", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CfDef.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_update_column_family_args.class, metaDataMap); +- } +- +- public system_update_column_family_args() { +- } +- +- public system_update_column_family_args( +- CfDef cf_def) +- { +- this(); +- this.cf_def = cf_def; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_update_column_family_args(system_update_column_family_args other) { +- if (other.isSetCf_def()) { +- this.cf_def = new CfDef(other.cf_def); +- } +- } +- +- public system_update_column_family_args deepCopy() { +- return new system_update_column_family_args(this); +- } +- +- @Override +- public void clear() { +- this.cf_def = null; +- } +- +- public CfDef getCf_def() { +- return this.cf_def; +- } +- +- public system_update_column_family_args setCf_def(CfDef cf_def) { +- this.cf_def = cf_def; +- return this; +- } +- +- public void unsetCf_def() { +- this.cf_def = null; +- } +- +- /** Returns true if field cf_def is set (has been assigned a value) and false otherwise */ +- public boolean isSetCf_def() { +- return this.cf_def != null; +- } +- +- public void setCf_defIsSet(boolean value) { +- if (!value) { +- this.cf_def = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case CF_DEF: +- if (value == null) { +- unsetCf_def(); +- } else { +- setCf_def((CfDef)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case CF_DEF: +- return getCf_def(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case CF_DEF: +- return isSetCf_def(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_update_column_family_args) +- return this.equals((system_update_column_family_args)that); +- return false; +- } +- +- public boolean equals(system_update_column_family_args that) { +- if (that == null) +- return false; +- +- boolean this_present_cf_def = true && this.isSetCf_def(); +- boolean that_present_cf_def = true && that.isSetCf_def(); +- if (this_present_cf_def || that_present_cf_def) { +- if (!(this_present_cf_def && that_present_cf_def)) +- return false; +- if (!this.cf_def.equals(that.cf_def)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_cf_def = true && (isSetCf_def()); +- builder.append(present_cf_def); +- if (present_cf_def) +- builder.append(cf_def); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_update_column_family_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetCf_def()).compareTo(other.isSetCf_def()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCf_def()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cf_def, other.cf_def); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_update_column_family_args("); +- boolean first = true; +- +- sb.append("cf_def:"); +- if (this.cf_def == null) { +- sb.append("null"); +- } else { +- sb.append(this.cf_def); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (cf_def == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'cf_def' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (cf_def != null) { +- cf_def.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_update_column_family_argsStandardSchemeFactory implements SchemeFactory { +- public system_update_column_family_argsStandardScheme getScheme() { +- return new system_update_column_family_argsStandardScheme(); +- } +- } +- +- private static class system_update_column_family_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_update_column_family_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // CF_DEF +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.cf_def = new CfDef(); +- struct.cf_def.read(iprot); +- struct.setCf_defIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_update_column_family_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.cf_def != null) { +- oprot.writeFieldBegin(CF_DEF_FIELD_DESC); +- struct.cf_def.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_update_column_family_argsTupleSchemeFactory implements SchemeFactory { +- public system_update_column_family_argsTupleScheme getScheme() { +- return new system_update_column_family_argsTupleScheme(); +- } +- } +- +- private static class system_update_column_family_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_update_column_family_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- struct.cf_def.write(oprot); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_update_column_family_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.cf_def = new CfDef(); +- struct.cf_def.read(iprot); +- struct.setCf_defIsSet(true); +- } +- } +- +- } +- +- public static class system_update_column_family_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("system_update_column_family_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField SDE_FIELD_DESC = new org.apache.thrift.protocol.TField("sde", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new system_update_column_family_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new system_update_column_family_resultTupleSchemeFactory()); +- } +- +- public String success; // required +- public InvalidRequestException ire; // required +- public SchemaDisagreementException sde; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- SDE((short)2, "sde"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // SDE +- return SDE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.SDE, new org.apache.thrift.meta_data.FieldMetaData("sde", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(system_update_column_family_result.class, metaDataMap); +- } +- +- public system_update_column_family_result() { +- } +- +- public system_update_column_family_result( +- String success, +- InvalidRequestException ire, +- SchemaDisagreementException sde) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.sde = sde; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public system_update_column_family_result(system_update_column_family_result other) { +- if (other.isSetSuccess()) { +- this.success = other.success; +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetSde()) { +- this.sde = new SchemaDisagreementException(other.sde); +- } +- } +- +- public system_update_column_family_result deepCopy() { +- return new system_update_column_family_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.sde = null; +- } +- +- public String getSuccess() { +- return this.success; +- } +- +- public system_update_column_family_result setSuccess(String success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public system_update_column_family_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public SchemaDisagreementException getSde() { +- return this.sde; +- } +- +- public system_update_column_family_result setSde(SchemaDisagreementException sde) { +- this.sde = sde; +- return this; +- } +- +- public void unsetSde() { +- this.sde = null; +- } +- +- /** Returns true if field sde is set (has been assigned a value) and false otherwise */ +- public boolean isSetSde() { +- return this.sde != null; +- } +- +- public void setSdeIsSet(boolean value) { +- if (!value) { +- this.sde = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((String)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case SDE: +- if (value == null) { +- unsetSde(); +- } else { +- setSde((SchemaDisagreementException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case SDE: +- return getSde(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case SDE: +- return isSetSde(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof system_update_column_family_result) +- return this.equals((system_update_column_family_result)that); +- return false; +- } +- +- public boolean equals(system_update_column_family_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_sde = true && this.isSetSde(); +- boolean that_present_sde = true && that.isSetSde(); +- if (this_present_sde || that_present_sde) { +- if (!(this_present_sde && that_present_sde)) +- return false; +- if (!this.sde.equals(that.sde)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_sde = true && (isSetSde()); +- builder.append(present_sde); +- if (present_sde) +- builder.append(sde); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(system_update_column_family_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSde()).compareTo(other.isSetSde()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSde()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sde, other.sde); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("system_update_column_family_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("sde:"); +- if (this.sde == null) { +- sb.append("null"); +- } else { +- sb.append(this.sde); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class system_update_column_family_resultStandardSchemeFactory implements SchemeFactory { +- public system_update_column_family_resultStandardScheme getScheme() { +- return new system_update_column_family_resultStandardScheme(); +- } +- } +- +- private static class system_update_column_family_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, system_update_column_family_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // SDE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, system_update_column_family_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- oprot.writeString(struct.success); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.sde != null) { +- oprot.writeFieldBegin(SDE_FIELD_DESC); +- struct.sde.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class system_update_column_family_resultTupleSchemeFactory implements SchemeFactory { +- public system_update_column_family_resultTupleScheme getScheme() { +- return new system_update_column_family_resultTupleScheme(); +- } +- } +- +- private static class system_update_column_family_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, system_update_column_family_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetSde()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetSuccess()) { +- oprot.writeString(struct.success); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetSde()) { +- struct.sde.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, system_update_column_family_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.success = iprot.readString(); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } +- } +- } +- +- } +- +- public static class execute_cql_query_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("execute_cql_query_args"); +- +- private static final org.apache.thrift.protocol.TField QUERY_FIELD_DESC = new org.apache.thrift.protocol.TField("query", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COMPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("compression", org.apache.thrift.protocol.TType.I32, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new execute_cql_query_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new execute_cql_query_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer query; // required +- /** +- * +- * @see Compression +- */ +- public Compression compression; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- QUERY((short)1, "query"), +- /** +- * +- * @see Compression +- */ +- COMPRESSION((short)2, "compression"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // QUERY +- return QUERY; +- case 2: // COMPRESSION +- return COMPRESSION; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.QUERY, new org.apache.thrift.meta_data.FieldMetaData("query", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COMPRESSION, new org.apache.thrift.meta_data.FieldMetaData("compression", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, Compression.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_cql_query_args.class, metaDataMap); +- } +- +- public execute_cql_query_args() { +- } +- +- public execute_cql_query_args( +- ByteBuffer query, +- Compression compression) +- { +- this(); +- this.query = query; +- this.compression = compression; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public execute_cql_query_args(execute_cql_query_args other) { +- if (other.isSetQuery()) { +- this.query = org.apache.thrift.TBaseHelper.copyBinary(other.query); +-; +- } +- if (other.isSetCompression()) { +- this.compression = other.compression; +- } +- } +- +- public execute_cql_query_args deepCopy() { +- return new execute_cql_query_args(this); +- } +- +- @Override +- public void clear() { +- this.query = null; +- this.compression = null; +- } +- +- public byte[] getQuery() { +- setQuery(org.apache.thrift.TBaseHelper.rightSize(query)); +- return query == null ? null : query.array(); +- } +- +- public ByteBuffer bufferForQuery() { +- return query; +- } +- +- public execute_cql_query_args setQuery(byte[] query) { +- setQuery(query == null ? (ByteBuffer)null : ByteBuffer.wrap(query)); +- return this; +- } +- +- public execute_cql_query_args setQuery(ByteBuffer query) { +- this.query = query; +- return this; +- } +- +- public void unsetQuery() { +- this.query = null; +- } +- +- /** Returns true if field query is set (has been assigned a value) and false otherwise */ +- public boolean isSetQuery() { +- return this.query != null; +- } +- +- public void setQueryIsSet(boolean value) { +- if (!value) { +- this.query = null; +- } +- } +- +- /** +- * +- * @see Compression +- */ +- public Compression getCompression() { +- return this.compression; +- } +- +- /** +- * +- * @see Compression +- */ +- public execute_cql_query_args setCompression(Compression compression) { +- this.compression = compression; +- return this; +- } +- +- public void unsetCompression() { +- this.compression = null; +- } +- +- /** Returns true if field compression is set (has been assigned a value) and false otherwise */ +- public boolean isSetCompression() { +- return this.compression != null; +- } +- +- public void setCompressionIsSet(boolean value) { +- if (!value) { +- this.compression = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case QUERY: +- if (value == null) { +- unsetQuery(); +- } else { +- setQuery((ByteBuffer)value); +- } +- break; +- +- case COMPRESSION: +- if (value == null) { +- unsetCompression(); +- } else { +- setCompression((Compression)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case QUERY: +- return getQuery(); +- +- case COMPRESSION: +- return getCompression(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case QUERY: +- return isSetQuery(); +- case COMPRESSION: +- return isSetCompression(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof execute_cql_query_args) +- return this.equals((execute_cql_query_args)that); +- return false; +- } +- +- public boolean equals(execute_cql_query_args that) { +- if (that == null) +- return false; +- +- boolean this_present_query = true && this.isSetQuery(); +- boolean that_present_query = true && that.isSetQuery(); +- if (this_present_query || that_present_query) { +- if (!(this_present_query && that_present_query)) +- return false; +- if (!this.query.equals(that.query)) +- return false; +- } +- +- boolean this_present_compression = true && this.isSetCompression(); +- boolean that_present_compression = true && that.isSetCompression(); +- if (this_present_compression || that_present_compression) { +- if (!(this_present_compression && that_present_compression)) +- return false; +- if (!this.compression.equals(that.compression)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_query = true && (isSetQuery()); +- builder.append(present_query); +- if (present_query) +- builder.append(query); +- +- boolean present_compression = true && (isSetCompression()); +- builder.append(present_compression); +- if (present_compression) +- builder.append(compression.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(execute_cql_query_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetQuery()).compareTo(other.isSetQuery()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetQuery()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.query, other.query); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCompression()).compareTo(other.isSetCompression()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCompression()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compression, other.compression); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("execute_cql_query_args("); +- boolean first = true; +- +- sb.append("query:"); +- if (this.query == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.query, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("compression:"); +- if (this.compression == null) { +- sb.append("null"); +- } else { +- sb.append(this.compression); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (query == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'query' was not present! Struct: " + toString()); +- } +- if (compression == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'compression' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class execute_cql_query_argsStandardSchemeFactory implements SchemeFactory { +- public execute_cql_query_argsStandardScheme getScheme() { +- return new execute_cql_query_argsStandardScheme(); +- } +- } +- +- private static class execute_cql_query_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, execute_cql_query_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // QUERY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.query = iprot.readBinary(); +- struct.setQueryIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COMPRESSION +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.compression = Compression.findByValue(iprot.readI32()); +- struct.setCompressionIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, execute_cql_query_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.query != null) { +- oprot.writeFieldBegin(QUERY_FIELD_DESC); +- oprot.writeBinary(struct.query); +- oprot.writeFieldEnd(); +- } +- if (struct.compression != null) { +- oprot.writeFieldBegin(COMPRESSION_FIELD_DESC); +- oprot.writeI32(struct.compression.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class execute_cql_query_argsTupleSchemeFactory implements SchemeFactory { +- public execute_cql_query_argsTupleScheme getScheme() { +- return new execute_cql_query_argsTupleScheme(); +- } +- } +- +- private static class execute_cql_query_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, execute_cql_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.query); +- oprot.writeI32(struct.compression.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, execute_cql_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.query = iprot.readBinary(); +- struct.setQueryIsSet(true); +- struct.compression = Compression.findByValue(iprot.readI32()); +- struct.setCompressionIsSet(true); +- } +- } +- +- } +- +- public static class execute_cql_query_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("execute_cql_query_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField SDE_FIELD_DESC = new org.apache.thrift.protocol.TField("sde", org.apache.thrift.protocol.TType.STRUCT, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new execute_cql_query_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new execute_cql_query_resultTupleSchemeFactory()); +- } +- +- public CqlResult success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- public SchemaDisagreementException sde; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"), +- SDE((short)4, "sde"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- case 4: // SDE +- return SDE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CqlResult.class))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.SDE, new org.apache.thrift.meta_data.FieldMetaData("sde", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_cql_query_result.class, metaDataMap); +- } +- +- public execute_cql_query_result() { +- } +- +- public execute_cql_query_result( +- CqlResult success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te, +- SchemaDisagreementException sde) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- this.sde = sde; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public execute_cql_query_result(execute_cql_query_result other) { +- if (other.isSetSuccess()) { +- this.success = new CqlResult(other.success); +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- if (other.isSetSde()) { +- this.sde = new SchemaDisagreementException(other.sde); +- } +- } +- +- public execute_cql_query_result deepCopy() { +- return new execute_cql_query_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- this.sde = null; +- } +- +- public CqlResult getSuccess() { +- return this.success; +- } +- +- public execute_cql_query_result setSuccess(CqlResult success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public execute_cql_query_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public execute_cql_query_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public execute_cql_query_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public SchemaDisagreementException getSde() { +- return this.sde; +- } +- +- public execute_cql_query_result setSde(SchemaDisagreementException sde) { +- this.sde = sde; +- return this; +- } +- +- public void unsetSde() { +- this.sde = null; +- } +- +- /** Returns true if field sde is set (has been assigned a value) and false otherwise */ +- public boolean isSetSde() { +- return this.sde != null; +- } +- +- public void setSdeIsSet(boolean value) { +- if (!value) { +- this.sde = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((CqlResult)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- case SDE: +- if (value == null) { +- unsetSde(); +- } else { +- setSde((SchemaDisagreementException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- case SDE: +- return getSde(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- case SDE: +- return isSetSde(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof execute_cql_query_result) +- return this.equals((execute_cql_query_result)that); +- return false; +- } +- +- public boolean equals(execute_cql_query_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- boolean this_present_sde = true && this.isSetSde(); +- boolean that_present_sde = true && that.isSetSde(); +- if (this_present_sde || that_present_sde) { +- if (!(this_present_sde && that_present_sde)) +- return false; +- if (!this.sde.equals(that.sde)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- boolean present_sde = true && (isSetSde()); +- builder.append(present_sde); +- if (present_sde) +- builder.append(sde); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(execute_cql_query_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSde()).compareTo(other.isSetSde()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSde()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sde, other.sde); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("execute_cql_query_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("sde:"); +- if (this.sde == null) { +- sb.append("null"); +- } else { +- sb.append(this.sde); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (success != null) { +- success.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class execute_cql_query_resultStandardSchemeFactory implements SchemeFactory { +- public execute_cql_query_resultStandardScheme getScheme() { +- return new execute_cql_query_resultStandardScheme(); +- } +- } +- +- private static class execute_cql_query_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, execute_cql_query_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.success = new CqlResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // SDE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, execute_cql_query_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- struct.success.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.sde != null) { +- oprot.writeFieldBegin(SDE_FIELD_DESC); +- struct.sde.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class execute_cql_query_resultTupleSchemeFactory implements SchemeFactory { +- public execute_cql_query_resultTupleScheme getScheme() { +- return new execute_cql_query_resultTupleScheme(); +- } +- } +- +- private static class execute_cql_query_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, execute_cql_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- if (struct.isSetSde()) { +- optionals.set(4); +- } +- oprot.writeBitSet(optionals, 5); +- if (struct.isSetSuccess()) { +- struct.success.write(oprot); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- if (struct.isSetSde()) { +- struct.sde.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, execute_cql_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(5); +- if (incoming.get(0)) { +- struct.success = new CqlResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- if (incoming.get(4)) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } +- } +- } +- +- } +- +- public static class execute_cql3_query_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("execute_cql3_query_args"); +- +- private static final org.apache.thrift.protocol.TField QUERY_FIELD_DESC = new org.apache.thrift.protocol.TField("query", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COMPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("compression", org.apache.thrift.protocol.TType.I32, (short)2); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency", org.apache.thrift.protocol.TType.I32, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new execute_cql3_query_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new execute_cql3_query_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer query; // required +- /** +- * +- * @see Compression +- */ +- public Compression compression; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- QUERY((short)1, "query"), +- /** +- * +- * @see Compression +- */ +- COMPRESSION((short)2, "compression"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY((short)3, "consistency"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // QUERY +- return QUERY; +- case 2: // COMPRESSION +- return COMPRESSION; +- case 3: // CONSISTENCY +- return CONSISTENCY; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.QUERY, new org.apache.thrift.meta_data.FieldMetaData("query", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COMPRESSION, new org.apache.thrift.meta_data.FieldMetaData("compression", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, Compression.class))); +- tmpMap.put(_Fields.CONSISTENCY, new org.apache.thrift.meta_data.FieldMetaData("consistency", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_cql3_query_args.class, metaDataMap); +- } +- +- public execute_cql3_query_args() { +- } +- +- public execute_cql3_query_args( +- ByteBuffer query, +- Compression compression, +- ConsistencyLevel consistency) +- { +- this(); +- this.query = query; +- this.compression = compression; +- this.consistency = consistency; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public execute_cql3_query_args(execute_cql3_query_args other) { +- if (other.isSetQuery()) { +- this.query = org.apache.thrift.TBaseHelper.copyBinary(other.query); +-; +- } +- if (other.isSetCompression()) { +- this.compression = other.compression; +- } +- if (other.isSetConsistency()) { +- this.consistency = other.consistency; +- } +- } +- +- public execute_cql3_query_args deepCopy() { +- return new execute_cql3_query_args(this); +- } +- +- @Override +- public void clear() { +- this.query = null; +- this.compression = null; +- this.consistency = null; +- } +- +- public byte[] getQuery() { +- setQuery(org.apache.thrift.TBaseHelper.rightSize(query)); +- return query == null ? null : query.array(); +- } +- +- public ByteBuffer bufferForQuery() { +- return query; +- } +- +- public execute_cql3_query_args setQuery(byte[] query) { +- setQuery(query == null ? (ByteBuffer)null : ByteBuffer.wrap(query)); +- return this; +- } +- +- public execute_cql3_query_args setQuery(ByteBuffer query) { +- this.query = query; +- return this; +- } +- +- public void unsetQuery() { +- this.query = null; +- } +- +- /** Returns true if field query is set (has been assigned a value) and false otherwise */ +- public boolean isSetQuery() { +- return this.query != null; +- } +- +- public void setQueryIsSet(boolean value) { +- if (!value) { +- this.query = null; +- } +- } +- +- /** +- * +- * @see Compression +- */ +- public Compression getCompression() { +- return this.compression; +- } +- +- /** +- * +- * @see Compression +- */ +- public execute_cql3_query_args setCompression(Compression compression) { +- this.compression = compression; +- return this; +- } +- +- public void unsetCompression() { +- this.compression = null; +- } +- +- /** Returns true if field compression is set (has been assigned a value) and false otherwise */ +- public boolean isSetCompression() { +- return this.compression != null; +- } +- +- public void setCompressionIsSet(boolean value) { +- if (!value) { +- this.compression = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency() { +- return this.consistency; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public execute_cql3_query_args setConsistency(ConsistencyLevel consistency) { +- this.consistency = consistency; +- return this; +- } +- +- public void unsetConsistency() { +- this.consistency = null; +- } +- +- /** Returns true if field consistency is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency() { +- return this.consistency != null; +- } +- +- public void setConsistencyIsSet(boolean value) { +- if (!value) { +- this.consistency = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case QUERY: +- if (value == null) { +- unsetQuery(); +- } else { +- setQuery((ByteBuffer)value); +- } +- break; +- +- case COMPRESSION: +- if (value == null) { +- unsetCompression(); +- } else { +- setCompression((Compression)value); +- } +- break; +- +- case CONSISTENCY: +- if (value == null) { +- unsetConsistency(); +- } else { +- setConsistency((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case QUERY: +- return getQuery(); +- +- case COMPRESSION: +- return getCompression(); +- +- case CONSISTENCY: +- return getConsistency(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case QUERY: +- return isSetQuery(); +- case COMPRESSION: +- return isSetCompression(); +- case CONSISTENCY: +- return isSetConsistency(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof execute_cql3_query_args) +- return this.equals((execute_cql3_query_args)that); +- return false; +- } +- +- public boolean equals(execute_cql3_query_args that) { +- if (that == null) +- return false; +- +- boolean this_present_query = true && this.isSetQuery(); +- boolean that_present_query = true && that.isSetQuery(); +- if (this_present_query || that_present_query) { +- if (!(this_present_query && that_present_query)) +- return false; +- if (!this.query.equals(that.query)) +- return false; +- } +- +- boolean this_present_compression = true && this.isSetCompression(); +- boolean that_present_compression = true && that.isSetCompression(); +- if (this_present_compression || that_present_compression) { +- if (!(this_present_compression && that_present_compression)) +- return false; +- if (!this.compression.equals(that.compression)) +- return false; +- } +- +- boolean this_present_consistency = true && this.isSetConsistency(); +- boolean that_present_consistency = true && that.isSetConsistency(); +- if (this_present_consistency || that_present_consistency) { +- if (!(this_present_consistency && that_present_consistency)) +- return false; +- if (!this.consistency.equals(that.consistency)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_query = true && (isSetQuery()); +- builder.append(present_query); +- if (present_query) +- builder.append(query); +- +- boolean present_compression = true && (isSetCompression()); +- builder.append(present_compression); +- if (present_compression) +- builder.append(compression.getValue()); +- +- boolean present_consistency = true && (isSetConsistency()); +- builder.append(present_consistency); +- if (present_consistency) +- builder.append(consistency.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(execute_cql3_query_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetQuery()).compareTo(other.isSetQuery()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetQuery()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.query, other.query); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCompression()).compareTo(other.isSetCompression()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCompression()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compression, other.compression); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency()).compareTo(other.isSetConsistency()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency, other.consistency); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("execute_cql3_query_args("); +- boolean first = true; +- +- sb.append("query:"); +- if (this.query == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.query, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("compression:"); +- if (this.compression == null) { +- sb.append("null"); +- } else { +- sb.append(this.compression); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency:"); +- if (this.consistency == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (query == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'query' was not present! Struct: " + toString()); +- } +- if (compression == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'compression' was not present! Struct: " + toString()); +- } +- if (consistency == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class execute_cql3_query_argsStandardSchemeFactory implements SchemeFactory { +- public execute_cql3_query_argsStandardScheme getScheme() { +- return new execute_cql3_query_argsStandardScheme(); +- } +- } +- +- private static class execute_cql3_query_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, execute_cql3_query_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // QUERY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.query = iprot.readBinary(); +- struct.setQueryIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COMPRESSION +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.compression = Compression.findByValue(iprot.readI32()); +- struct.setCompressionIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // CONSISTENCY +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistencyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, execute_cql3_query_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.query != null) { +- oprot.writeFieldBegin(QUERY_FIELD_DESC); +- oprot.writeBinary(struct.query); +- oprot.writeFieldEnd(); +- } +- if (struct.compression != null) { +- oprot.writeFieldBegin(COMPRESSION_FIELD_DESC); +- oprot.writeI32(struct.compression.getValue()); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency != null) { +- oprot.writeFieldBegin(CONSISTENCY_FIELD_DESC); +- oprot.writeI32(struct.consistency.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class execute_cql3_query_argsTupleSchemeFactory implements SchemeFactory { +- public execute_cql3_query_argsTupleScheme getScheme() { +- return new execute_cql3_query_argsTupleScheme(); +- } +- } +- +- private static class execute_cql3_query_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, execute_cql3_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.query); +- oprot.writeI32(struct.compression.getValue()); +- oprot.writeI32(struct.consistency.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, execute_cql3_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.query = iprot.readBinary(); +- struct.setQueryIsSet(true); +- struct.compression = Compression.findByValue(iprot.readI32()); +- struct.setCompressionIsSet(true); +- struct.consistency = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistencyIsSet(true); +- } +- } +- +- } +- +- public static class execute_cql3_query_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("execute_cql3_query_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField SDE_FIELD_DESC = new org.apache.thrift.protocol.TField("sde", org.apache.thrift.protocol.TType.STRUCT, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new execute_cql3_query_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new execute_cql3_query_resultTupleSchemeFactory()); +- } +- +- public CqlResult success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- public SchemaDisagreementException sde; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"), +- SDE((short)4, "sde"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- case 4: // SDE +- return SDE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CqlResult.class))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.SDE, new org.apache.thrift.meta_data.FieldMetaData("sde", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_cql3_query_result.class, metaDataMap); +- } +- +- public execute_cql3_query_result() { +- } +- +- public execute_cql3_query_result( +- CqlResult success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te, +- SchemaDisagreementException sde) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- this.sde = sde; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public execute_cql3_query_result(execute_cql3_query_result other) { +- if (other.isSetSuccess()) { +- this.success = new CqlResult(other.success); +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- if (other.isSetSde()) { +- this.sde = new SchemaDisagreementException(other.sde); +- } +- } +- +- public execute_cql3_query_result deepCopy() { +- return new execute_cql3_query_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- this.sde = null; +- } +- +- public CqlResult getSuccess() { +- return this.success; +- } +- +- public execute_cql3_query_result setSuccess(CqlResult success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public execute_cql3_query_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public execute_cql3_query_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public execute_cql3_query_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public SchemaDisagreementException getSde() { +- return this.sde; +- } +- +- public execute_cql3_query_result setSde(SchemaDisagreementException sde) { +- this.sde = sde; +- return this; +- } +- +- public void unsetSde() { +- this.sde = null; +- } +- +- /** Returns true if field sde is set (has been assigned a value) and false otherwise */ +- public boolean isSetSde() { +- return this.sde != null; +- } +- +- public void setSdeIsSet(boolean value) { +- if (!value) { +- this.sde = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((CqlResult)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- case SDE: +- if (value == null) { +- unsetSde(); +- } else { +- setSde((SchemaDisagreementException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- case SDE: +- return getSde(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- case SDE: +- return isSetSde(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof execute_cql3_query_result) +- return this.equals((execute_cql3_query_result)that); +- return false; +- } +- +- public boolean equals(execute_cql3_query_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- boolean this_present_sde = true && this.isSetSde(); +- boolean that_present_sde = true && that.isSetSde(); +- if (this_present_sde || that_present_sde) { +- if (!(this_present_sde && that_present_sde)) +- return false; +- if (!this.sde.equals(that.sde)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- boolean present_sde = true && (isSetSde()); +- builder.append(present_sde); +- if (present_sde) +- builder.append(sde); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(execute_cql3_query_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSde()).compareTo(other.isSetSde()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSde()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sde, other.sde); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("execute_cql3_query_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("sde:"); +- if (this.sde == null) { +- sb.append("null"); +- } else { +- sb.append(this.sde); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (success != null) { +- success.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class execute_cql3_query_resultStandardSchemeFactory implements SchemeFactory { +- public execute_cql3_query_resultStandardScheme getScheme() { +- return new execute_cql3_query_resultStandardScheme(); +- } +- } +- +- private static class execute_cql3_query_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, execute_cql3_query_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.success = new CqlResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // SDE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, execute_cql3_query_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- struct.success.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.sde != null) { +- oprot.writeFieldBegin(SDE_FIELD_DESC); +- struct.sde.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class execute_cql3_query_resultTupleSchemeFactory implements SchemeFactory { +- public execute_cql3_query_resultTupleScheme getScheme() { +- return new execute_cql3_query_resultTupleScheme(); +- } +- } +- +- private static class execute_cql3_query_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, execute_cql3_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- if (struct.isSetSde()) { +- optionals.set(4); +- } +- oprot.writeBitSet(optionals, 5); +- if (struct.isSetSuccess()) { +- struct.success.write(oprot); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- if (struct.isSetSde()) { +- struct.sde.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, execute_cql3_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(5); +- if (incoming.get(0)) { +- struct.success = new CqlResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- if (incoming.get(4)) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } +- } +- } +- +- } +- +- public static class prepare_cql_query_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("prepare_cql_query_args"); +- +- private static final org.apache.thrift.protocol.TField QUERY_FIELD_DESC = new org.apache.thrift.protocol.TField("query", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COMPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("compression", org.apache.thrift.protocol.TType.I32, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new prepare_cql_query_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new prepare_cql_query_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer query; // required +- /** +- * +- * @see Compression +- */ +- public Compression compression; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- QUERY((short)1, "query"), +- /** +- * +- * @see Compression +- */ +- COMPRESSION((short)2, "compression"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // QUERY +- return QUERY; +- case 2: // COMPRESSION +- return COMPRESSION; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.QUERY, new org.apache.thrift.meta_data.FieldMetaData("query", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COMPRESSION, new org.apache.thrift.meta_data.FieldMetaData("compression", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, Compression.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(prepare_cql_query_args.class, metaDataMap); +- } +- +- public prepare_cql_query_args() { +- } +- +- public prepare_cql_query_args( +- ByteBuffer query, +- Compression compression) +- { +- this(); +- this.query = query; +- this.compression = compression; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public prepare_cql_query_args(prepare_cql_query_args other) { +- if (other.isSetQuery()) { +- this.query = org.apache.thrift.TBaseHelper.copyBinary(other.query); +-; +- } +- if (other.isSetCompression()) { +- this.compression = other.compression; +- } +- } +- +- public prepare_cql_query_args deepCopy() { +- return new prepare_cql_query_args(this); +- } +- +- @Override +- public void clear() { +- this.query = null; +- this.compression = null; +- } +- +- public byte[] getQuery() { +- setQuery(org.apache.thrift.TBaseHelper.rightSize(query)); +- return query == null ? null : query.array(); +- } +- +- public ByteBuffer bufferForQuery() { +- return query; +- } +- +- public prepare_cql_query_args setQuery(byte[] query) { +- setQuery(query == null ? (ByteBuffer)null : ByteBuffer.wrap(query)); +- return this; +- } +- +- public prepare_cql_query_args setQuery(ByteBuffer query) { +- this.query = query; +- return this; +- } +- +- public void unsetQuery() { +- this.query = null; +- } +- +- /** Returns true if field query is set (has been assigned a value) and false otherwise */ +- public boolean isSetQuery() { +- return this.query != null; +- } +- +- public void setQueryIsSet(boolean value) { +- if (!value) { +- this.query = null; +- } +- } +- +- /** +- * +- * @see Compression +- */ +- public Compression getCompression() { +- return this.compression; +- } +- +- /** +- * +- * @see Compression +- */ +- public prepare_cql_query_args setCompression(Compression compression) { +- this.compression = compression; +- return this; +- } +- +- public void unsetCompression() { +- this.compression = null; +- } +- +- /** Returns true if field compression is set (has been assigned a value) and false otherwise */ +- public boolean isSetCompression() { +- return this.compression != null; +- } +- +- public void setCompressionIsSet(boolean value) { +- if (!value) { +- this.compression = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case QUERY: +- if (value == null) { +- unsetQuery(); +- } else { +- setQuery((ByteBuffer)value); +- } +- break; +- +- case COMPRESSION: +- if (value == null) { +- unsetCompression(); +- } else { +- setCompression((Compression)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case QUERY: +- return getQuery(); +- +- case COMPRESSION: +- return getCompression(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case QUERY: +- return isSetQuery(); +- case COMPRESSION: +- return isSetCompression(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof prepare_cql_query_args) +- return this.equals((prepare_cql_query_args)that); +- return false; +- } +- +- public boolean equals(prepare_cql_query_args that) { +- if (that == null) +- return false; +- +- boolean this_present_query = true && this.isSetQuery(); +- boolean that_present_query = true && that.isSetQuery(); +- if (this_present_query || that_present_query) { +- if (!(this_present_query && that_present_query)) +- return false; +- if (!this.query.equals(that.query)) +- return false; +- } +- +- boolean this_present_compression = true && this.isSetCompression(); +- boolean that_present_compression = true && that.isSetCompression(); +- if (this_present_compression || that_present_compression) { +- if (!(this_present_compression && that_present_compression)) +- return false; +- if (!this.compression.equals(that.compression)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_query = true && (isSetQuery()); +- builder.append(present_query); +- if (present_query) +- builder.append(query); +- +- boolean present_compression = true && (isSetCompression()); +- builder.append(present_compression); +- if (present_compression) +- builder.append(compression.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(prepare_cql_query_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetQuery()).compareTo(other.isSetQuery()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetQuery()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.query, other.query); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCompression()).compareTo(other.isSetCompression()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCompression()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compression, other.compression); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("prepare_cql_query_args("); +- boolean first = true; +- +- sb.append("query:"); +- if (this.query == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.query, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("compression:"); +- if (this.compression == null) { +- sb.append("null"); +- } else { +- sb.append(this.compression); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (query == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'query' was not present! Struct: " + toString()); +- } +- if (compression == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'compression' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class prepare_cql_query_argsStandardSchemeFactory implements SchemeFactory { +- public prepare_cql_query_argsStandardScheme getScheme() { +- return new prepare_cql_query_argsStandardScheme(); +- } +- } +- +- private static class prepare_cql_query_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, prepare_cql_query_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // QUERY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.query = iprot.readBinary(); +- struct.setQueryIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COMPRESSION +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.compression = Compression.findByValue(iprot.readI32()); +- struct.setCompressionIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, prepare_cql_query_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.query != null) { +- oprot.writeFieldBegin(QUERY_FIELD_DESC); +- oprot.writeBinary(struct.query); +- oprot.writeFieldEnd(); +- } +- if (struct.compression != null) { +- oprot.writeFieldBegin(COMPRESSION_FIELD_DESC); +- oprot.writeI32(struct.compression.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class prepare_cql_query_argsTupleSchemeFactory implements SchemeFactory { +- public prepare_cql_query_argsTupleScheme getScheme() { +- return new prepare_cql_query_argsTupleScheme(); +- } +- } +- +- private static class prepare_cql_query_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, prepare_cql_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.query); +- oprot.writeI32(struct.compression.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, prepare_cql_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.query = iprot.readBinary(); +- struct.setQueryIsSet(true); +- struct.compression = Compression.findByValue(iprot.readI32()); +- struct.setCompressionIsSet(true); +- } +- } +- +- } +- +- public static class prepare_cql_query_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("prepare_cql_query_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new prepare_cql_query_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new prepare_cql_query_resultTupleSchemeFactory()); +- } +- +- public CqlPreparedResult success; // required +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CqlPreparedResult.class))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(prepare_cql_query_result.class, metaDataMap); +- } +- +- public prepare_cql_query_result() { +- } +- +- public prepare_cql_query_result( +- CqlPreparedResult success, +- InvalidRequestException ire) +- { +- this(); +- this.success = success; +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public prepare_cql_query_result(prepare_cql_query_result other) { +- if (other.isSetSuccess()) { +- this.success = new CqlPreparedResult(other.success); +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public prepare_cql_query_result deepCopy() { +- return new prepare_cql_query_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- } +- +- public CqlPreparedResult getSuccess() { +- return this.success; +- } +- +- public prepare_cql_query_result setSuccess(CqlPreparedResult success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public prepare_cql_query_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((CqlPreparedResult)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof prepare_cql_query_result) +- return this.equals((prepare_cql_query_result)that); +- return false; +- } +- +- public boolean equals(prepare_cql_query_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(prepare_cql_query_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("prepare_cql_query_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (success != null) { +- success.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class prepare_cql_query_resultStandardSchemeFactory implements SchemeFactory { +- public prepare_cql_query_resultStandardScheme getScheme() { +- return new prepare_cql_query_resultStandardScheme(); +- } +- } +- +- private static class prepare_cql_query_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, prepare_cql_query_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.success = new CqlPreparedResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, prepare_cql_query_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- struct.success.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class prepare_cql_query_resultTupleSchemeFactory implements SchemeFactory { +- public prepare_cql_query_resultTupleScheme getScheme() { +- return new prepare_cql_query_resultTupleScheme(); +- } +- } +- +- private static class prepare_cql_query_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, prepare_cql_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetSuccess()) { +- struct.success.write(oprot); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, prepare_cql_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- struct.success = new CqlPreparedResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class prepare_cql3_query_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("prepare_cql3_query_args"); +- +- private static final org.apache.thrift.protocol.TField QUERY_FIELD_DESC = new org.apache.thrift.protocol.TField("query", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COMPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("compression", org.apache.thrift.protocol.TType.I32, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new prepare_cql3_query_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new prepare_cql3_query_argsTupleSchemeFactory()); +- } +- +- public ByteBuffer query; // required +- /** +- * +- * @see Compression +- */ +- public Compression compression; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- QUERY((short)1, "query"), +- /** +- * +- * @see Compression +- */ +- COMPRESSION((short)2, "compression"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // QUERY +- return QUERY; +- case 2: // COMPRESSION +- return COMPRESSION; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.QUERY, new org.apache.thrift.meta_data.FieldMetaData("query", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COMPRESSION, new org.apache.thrift.meta_data.FieldMetaData("compression", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, Compression.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(prepare_cql3_query_args.class, metaDataMap); +- } +- +- public prepare_cql3_query_args() { +- } +- +- public prepare_cql3_query_args( +- ByteBuffer query, +- Compression compression) +- { +- this(); +- this.query = query; +- this.compression = compression; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public prepare_cql3_query_args(prepare_cql3_query_args other) { +- if (other.isSetQuery()) { +- this.query = org.apache.thrift.TBaseHelper.copyBinary(other.query); +-; +- } +- if (other.isSetCompression()) { +- this.compression = other.compression; +- } +- } +- +- public prepare_cql3_query_args deepCopy() { +- return new prepare_cql3_query_args(this); +- } +- +- @Override +- public void clear() { +- this.query = null; +- this.compression = null; +- } +- +- public byte[] getQuery() { +- setQuery(org.apache.thrift.TBaseHelper.rightSize(query)); +- return query == null ? null : query.array(); +- } +- +- public ByteBuffer bufferForQuery() { +- return query; +- } +- +- public prepare_cql3_query_args setQuery(byte[] query) { +- setQuery(query == null ? (ByteBuffer)null : ByteBuffer.wrap(query)); +- return this; +- } +- +- public prepare_cql3_query_args setQuery(ByteBuffer query) { +- this.query = query; +- return this; +- } +- +- public void unsetQuery() { +- this.query = null; +- } +- +- /** Returns true if field query is set (has been assigned a value) and false otherwise */ +- public boolean isSetQuery() { +- return this.query != null; +- } +- +- public void setQueryIsSet(boolean value) { +- if (!value) { +- this.query = null; +- } +- } +- +- /** +- * +- * @see Compression +- */ +- public Compression getCompression() { +- return this.compression; +- } +- +- /** +- * +- * @see Compression +- */ +- public prepare_cql3_query_args setCompression(Compression compression) { +- this.compression = compression; +- return this; +- } +- +- public void unsetCompression() { +- this.compression = null; +- } +- +- /** Returns true if field compression is set (has been assigned a value) and false otherwise */ +- public boolean isSetCompression() { +- return this.compression != null; +- } +- +- public void setCompressionIsSet(boolean value) { +- if (!value) { +- this.compression = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case QUERY: +- if (value == null) { +- unsetQuery(); +- } else { +- setQuery((ByteBuffer)value); +- } +- break; +- +- case COMPRESSION: +- if (value == null) { +- unsetCompression(); +- } else { +- setCompression((Compression)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case QUERY: +- return getQuery(); +- +- case COMPRESSION: +- return getCompression(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case QUERY: +- return isSetQuery(); +- case COMPRESSION: +- return isSetCompression(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof prepare_cql3_query_args) +- return this.equals((prepare_cql3_query_args)that); +- return false; +- } +- +- public boolean equals(prepare_cql3_query_args that) { +- if (that == null) +- return false; +- +- boolean this_present_query = true && this.isSetQuery(); +- boolean that_present_query = true && that.isSetQuery(); +- if (this_present_query || that_present_query) { +- if (!(this_present_query && that_present_query)) +- return false; +- if (!this.query.equals(that.query)) +- return false; +- } +- +- boolean this_present_compression = true && this.isSetCompression(); +- boolean that_present_compression = true && that.isSetCompression(); +- if (this_present_compression || that_present_compression) { +- if (!(this_present_compression && that_present_compression)) +- return false; +- if (!this.compression.equals(that.compression)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_query = true && (isSetQuery()); +- builder.append(present_query); +- if (present_query) +- builder.append(query); +- +- boolean present_compression = true && (isSetCompression()); +- builder.append(present_compression); +- if (present_compression) +- builder.append(compression.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(prepare_cql3_query_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetQuery()).compareTo(other.isSetQuery()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetQuery()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.query, other.query); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCompression()).compareTo(other.isSetCompression()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCompression()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compression, other.compression); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("prepare_cql3_query_args("); +- boolean first = true; +- +- sb.append("query:"); +- if (this.query == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.query, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("compression:"); +- if (this.compression == null) { +- sb.append("null"); +- } else { +- sb.append(this.compression); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (query == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'query' was not present! Struct: " + toString()); +- } +- if (compression == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'compression' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class prepare_cql3_query_argsStandardSchemeFactory implements SchemeFactory { +- public prepare_cql3_query_argsStandardScheme getScheme() { +- return new prepare_cql3_query_argsStandardScheme(); +- } +- } +- +- private static class prepare_cql3_query_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, prepare_cql3_query_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // QUERY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.query = iprot.readBinary(); +- struct.setQueryIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COMPRESSION +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.compression = Compression.findByValue(iprot.readI32()); +- struct.setCompressionIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, prepare_cql3_query_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.query != null) { +- oprot.writeFieldBegin(QUERY_FIELD_DESC); +- oprot.writeBinary(struct.query); +- oprot.writeFieldEnd(); +- } +- if (struct.compression != null) { +- oprot.writeFieldBegin(COMPRESSION_FIELD_DESC); +- oprot.writeI32(struct.compression.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class prepare_cql3_query_argsTupleSchemeFactory implements SchemeFactory { +- public prepare_cql3_query_argsTupleScheme getScheme() { +- return new prepare_cql3_query_argsTupleScheme(); +- } +- } +- +- private static class prepare_cql3_query_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, prepare_cql3_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.query); +- oprot.writeI32(struct.compression.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, prepare_cql3_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.query = iprot.readBinary(); +- struct.setQueryIsSet(true); +- struct.compression = Compression.findByValue(iprot.readI32()); +- struct.setCompressionIsSet(true); +- } +- } +- +- } +- +- public static class prepare_cql3_query_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("prepare_cql3_query_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new prepare_cql3_query_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new prepare_cql3_query_resultTupleSchemeFactory()); +- } +- +- public CqlPreparedResult success; // required +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CqlPreparedResult.class))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(prepare_cql3_query_result.class, metaDataMap); +- } +- +- public prepare_cql3_query_result() { +- } +- +- public prepare_cql3_query_result( +- CqlPreparedResult success, +- InvalidRequestException ire) +- { +- this(); +- this.success = success; +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public prepare_cql3_query_result(prepare_cql3_query_result other) { +- if (other.isSetSuccess()) { +- this.success = new CqlPreparedResult(other.success); +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public prepare_cql3_query_result deepCopy() { +- return new prepare_cql3_query_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- } +- +- public CqlPreparedResult getSuccess() { +- return this.success; +- } +- +- public prepare_cql3_query_result setSuccess(CqlPreparedResult success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public prepare_cql3_query_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((CqlPreparedResult)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof prepare_cql3_query_result) +- return this.equals((prepare_cql3_query_result)that); +- return false; +- } +- +- public boolean equals(prepare_cql3_query_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(prepare_cql3_query_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("prepare_cql3_query_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (success != null) { +- success.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class prepare_cql3_query_resultStandardSchemeFactory implements SchemeFactory { +- public prepare_cql3_query_resultStandardScheme getScheme() { +- return new prepare_cql3_query_resultStandardScheme(); +- } +- } +- +- private static class prepare_cql3_query_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, prepare_cql3_query_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.success = new CqlPreparedResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, prepare_cql3_query_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- struct.success.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class prepare_cql3_query_resultTupleSchemeFactory implements SchemeFactory { +- public prepare_cql3_query_resultTupleScheme getScheme() { +- return new prepare_cql3_query_resultTupleScheme(); +- } +- } +- +- private static class prepare_cql3_query_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, prepare_cql3_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetSuccess()) { +- struct.success.write(oprot); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, prepare_cql3_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- struct.success = new CqlPreparedResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +- public static class execute_prepared_cql_query_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("execute_prepared_cql_query_args"); +- +- private static final org.apache.thrift.protocol.TField ITEM_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("itemId", org.apache.thrift.protocol.TType.I32, (short)1); +- private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new execute_prepared_cql_query_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new execute_prepared_cql_query_argsTupleSchemeFactory()); +- } +- +- public int itemId; // required +- public List values; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- ITEM_ID((short)1, "itemId"), +- VALUES((short)2, "values"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // ITEM_ID +- return ITEM_ID; +- case 2: // VALUES +- return VALUES; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __ITEMID_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.ITEM_ID, new org.apache.thrift.meta_data.FieldMetaData("itemId", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_prepared_cql_query_args.class, metaDataMap); +- } +- +- public execute_prepared_cql_query_args() { +- } +- +- public execute_prepared_cql_query_args( +- int itemId, +- List values) +- { +- this(); +- this.itemId = itemId; +- setItemIdIsSet(true); +- this.values = values; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public execute_prepared_cql_query_args(execute_prepared_cql_query_args other) { +- __isset_bitfield = other.__isset_bitfield; +- this.itemId = other.itemId; +- if (other.isSetValues()) { +- List __this__values = new ArrayList(other.values); +- this.values = __this__values; +- } +- } +- +- public execute_prepared_cql_query_args deepCopy() { +- return new execute_prepared_cql_query_args(this); +- } +- +- @Override +- public void clear() { +- setItemIdIsSet(false); +- this.itemId = 0; +- this.values = null; +- } +- +- public int getItemId() { +- return this.itemId; +- } +- +- public execute_prepared_cql_query_args setItemId(int itemId) { +- this.itemId = itemId; +- setItemIdIsSet(true); +- return this; +- } +- +- public void unsetItemId() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ITEMID_ISSET_ID); +- } +- +- /** Returns true if field itemId is set (has been assigned a value) and false otherwise */ +- public boolean isSetItemId() { +- return EncodingUtils.testBit(__isset_bitfield, __ITEMID_ISSET_ID); +- } +- +- public void setItemIdIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ITEMID_ISSET_ID, value); +- } +- +- public int getValuesSize() { +- return (this.values == null) ? 0 : this.values.size(); +- } +- +- public java.util.Iterator getValuesIterator() { +- return (this.values == null) ? null : this.values.iterator(); +- } +- +- public void addToValues(ByteBuffer elem) { +- if (this.values == null) { +- this.values = new ArrayList(); +- } +- this.values.add(elem); +- } +- +- public List getValues() { +- return this.values; +- } +- +- public execute_prepared_cql_query_args setValues(List values) { +- this.values = values; +- return this; +- } +- +- public void unsetValues() { +- this.values = null; +- } +- +- /** Returns true if field values is set (has been assigned a value) and false otherwise */ +- public boolean isSetValues() { +- return this.values != null; +- } +- +- public void setValuesIsSet(boolean value) { +- if (!value) { +- this.values = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case ITEM_ID: +- if (value == null) { +- unsetItemId(); +- } else { +- setItemId((Integer)value); +- } +- break; +- +- case VALUES: +- if (value == null) { +- unsetValues(); +- } else { +- setValues((List)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case ITEM_ID: +- return Integer.valueOf(getItemId()); +- +- case VALUES: +- return getValues(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case ITEM_ID: +- return isSetItemId(); +- case VALUES: +- return isSetValues(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof execute_prepared_cql_query_args) +- return this.equals((execute_prepared_cql_query_args)that); +- return false; +- } +- +- public boolean equals(execute_prepared_cql_query_args that) { +- if (that == null) +- return false; +- +- boolean this_present_itemId = true; +- boolean that_present_itemId = true; +- if (this_present_itemId || that_present_itemId) { +- if (!(this_present_itemId && that_present_itemId)) +- return false; +- if (this.itemId != that.itemId) +- return false; +- } +- +- boolean this_present_values = true && this.isSetValues(); +- boolean that_present_values = true && that.isSetValues(); +- if (this_present_values || that_present_values) { +- if (!(this_present_values && that_present_values)) +- return false; +- if (!this.values.equals(that.values)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_itemId = true; +- builder.append(present_itemId); +- if (present_itemId) +- builder.append(itemId); +- +- boolean present_values = true && (isSetValues()); +- builder.append(present_values); +- if (present_values) +- builder.append(values); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(execute_prepared_cql_query_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetItemId()).compareTo(other.isSetItemId()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetItemId()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.itemId, other.itemId); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetValues()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("execute_prepared_cql_query_args("); +- boolean first = true; +- +- sb.append("itemId:"); +- sb.append(this.itemId); +- first = false; +- if (!first) sb.append(", "); +- sb.append("values:"); +- if (this.values == null) { +- sb.append("null"); +- } else { +- sb.append(this.values); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // alas, we cannot check 'itemId' because it's a primitive and you chose the non-beans generator. +- if (values == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class execute_prepared_cql_query_argsStandardSchemeFactory implements SchemeFactory { +- public execute_prepared_cql_query_argsStandardScheme getScheme() { +- return new execute_prepared_cql_query_argsStandardScheme(); +- } +- } +- +- private static class execute_prepared_cql_query_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, execute_prepared_cql_query_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // ITEM_ID +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.itemId = iprot.readI32(); +- struct.setItemIdIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // VALUES +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list448 = iprot.readListBegin(); +- struct.values = new ArrayList(_list448.size); +- for (int _i449 = 0; _i449 < _list448.size; ++_i449) +- { +- ByteBuffer _elem450; +- _elem450 = iprot.readBinary(); +- struct.values.add(_elem450); +- } +- iprot.readListEnd(); +- } +- struct.setValuesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetItemId()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'itemId' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, execute_prepared_cql_query_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldBegin(ITEM_ID_FIELD_DESC); +- oprot.writeI32(struct.itemId); +- oprot.writeFieldEnd(); +- if (struct.values != null) { +- oprot.writeFieldBegin(VALUES_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); +- for (ByteBuffer _iter451 : struct.values) +- { +- oprot.writeBinary(_iter451); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class execute_prepared_cql_query_argsTupleSchemeFactory implements SchemeFactory { +- public execute_prepared_cql_query_argsTupleScheme getScheme() { +- return new execute_prepared_cql_query_argsTupleScheme(); +- } +- } +- +- private static class execute_prepared_cql_query_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, execute_prepared_cql_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeI32(struct.itemId); +- { +- oprot.writeI32(struct.values.size()); +- for (ByteBuffer _iter452 : struct.values) +- { +- oprot.writeBinary(_iter452); +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, execute_prepared_cql_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.itemId = iprot.readI32(); +- struct.setItemIdIsSet(true); +- { +- org.apache.thrift.protocol.TList _list453 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.values = new ArrayList(_list453.size); +- for (int _i454 = 0; _i454 < _list453.size; ++_i454) +- { +- ByteBuffer _elem455; +- _elem455 = iprot.readBinary(); +- struct.values.add(_elem455); +- } +- } +- struct.setValuesIsSet(true); +- } +- } +- +- } +- +- public static class execute_prepared_cql_query_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("execute_prepared_cql_query_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField SDE_FIELD_DESC = new org.apache.thrift.protocol.TField("sde", org.apache.thrift.protocol.TType.STRUCT, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new execute_prepared_cql_query_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new execute_prepared_cql_query_resultTupleSchemeFactory()); +- } +- +- public CqlResult success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- public SchemaDisagreementException sde; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"), +- SDE((short)4, "sde"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- case 4: // SDE +- return SDE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CqlResult.class))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.SDE, new org.apache.thrift.meta_data.FieldMetaData("sde", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_prepared_cql_query_result.class, metaDataMap); +- } +- +- public execute_prepared_cql_query_result() { +- } +- +- public execute_prepared_cql_query_result( +- CqlResult success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te, +- SchemaDisagreementException sde) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- this.sde = sde; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public execute_prepared_cql_query_result(execute_prepared_cql_query_result other) { +- if (other.isSetSuccess()) { +- this.success = new CqlResult(other.success); +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- if (other.isSetSde()) { +- this.sde = new SchemaDisagreementException(other.sde); +- } +- } +- +- public execute_prepared_cql_query_result deepCopy() { +- return new execute_prepared_cql_query_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- this.sde = null; +- } +- +- public CqlResult getSuccess() { +- return this.success; +- } +- +- public execute_prepared_cql_query_result setSuccess(CqlResult success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public execute_prepared_cql_query_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public execute_prepared_cql_query_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public execute_prepared_cql_query_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public SchemaDisagreementException getSde() { +- return this.sde; +- } +- +- public execute_prepared_cql_query_result setSde(SchemaDisagreementException sde) { +- this.sde = sde; +- return this; +- } +- +- public void unsetSde() { +- this.sde = null; +- } +- +- /** Returns true if field sde is set (has been assigned a value) and false otherwise */ +- public boolean isSetSde() { +- return this.sde != null; +- } +- +- public void setSdeIsSet(boolean value) { +- if (!value) { +- this.sde = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((CqlResult)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- case SDE: +- if (value == null) { +- unsetSde(); +- } else { +- setSde((SchemaDisagreementException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- case SDE: +- return getSde(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- case SDE: +- return isSetSde(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof execute_prepared_cql_query_result) +- return this.equals((execute_prepared_cql_query_result)that); +- return false; +- } +- +- public boolean equals(execute_prepared_cql_query_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- boolean this_present_sde = true && this.isSetSde(); +- boolean that_present_sde = true && that.isSetSde(); +- if (this_present_sde || that_present_sde) { +- if (!(this_present_sde && that_present_sde)) +- return false; +- if (!this.sde.equals(that.sde)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- boolean present_sde = true && (isSetSde()); +- builder.append(present_sde); +- if (present_sde) +- builder.append(sde); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(execute_prepared_cql_query_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSde()).compareTo(other.isSetSde()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSde()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sde, other.sde); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("execute_prepared_cql_query_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("sde:"); +- if (this.sde == null) { +- sb.append("null"); +- } else { +- sb.append(this.sde); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (success != null) { +- success.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class execute_prepared_cql_query_resultStandardSchemeFactory implements SchemeFactory { +- public execute_prepared_cql_query_resultStandardScheme getScheme() { +- return new execute_prepared_cql_query_resultStandardScheme(); +- } +- } +- +- private static class execute_prepared_cql_query_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, execute_prepared_cql_query_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.success = new CqlResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // SDE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, execute_prepared_cql_query_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- struct.success.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.sde != null) { +- oprot.writeFieldBegin(SDE_FIELD_DESC); +- struct.sde.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class execute_prepared_cql_query_resultTupleSchemeFactory implements SchemeFactory { +- public execute_prepared_cql_query_resultTupleScheme getScheme() { +- return new execute_prepared_cql_query_resultTupleScheme(); +- } +- } +- +- private static class execute_prepared_cql_query_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, execute_prepared_cql_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- if (struct.isSetSde()) { +- optionals.set(4); +- } +- oprot.writeBitSet(optionals, 5); +- if (struct.isSetSuccess()) { +- struct.success.write(oprot); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- if (struct.isSetSde()) { +- struct.sde.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, execute_prepared_cql_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(5); +- if (incoming.get(0)) { +- struct.success = new CqlResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- if (incoming.get(4)) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } +- } +- } +- +- } +- +- public static class execute_prepared_cql3_query_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("execute_prepared_cql3_query_args"); +- +- private static final org.apache.thrift.protocol.TField ITEM_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("itemId", org.apache.thrift.protocol.TType.I32, (short)1); +- private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)2); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency", org.apache.thrift.protocol.TType.I32, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new execute_prepared_cql3_query_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new execute_prepared_cql3_query_argsTupleSchemeFactory()); +- } +- +- public int itemId; // required +- public List values; // required +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- ITEM_ID((short)1, "itemId"), +- VALUES((short)2, "values"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY((short)3, "consistency"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // ITEM_ID +- return ITEM_ID; +- case 2: // VALUES +- return VALUES; +- case 3: // CONSISTENCY +- return CONSISTENCY; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __ITEMID_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.ITEM_ID, new org.apache.thrift.meta_data.FieldMetaData("itemId", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)))); +- tmpMap.put(_Fields.CONSISTENCY, new org.apache.thrift.meta_data.FieldMetaData("consistency", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_prepared_cql3_query_args.class, metaDataMap); +- } +- +- public execute_prepared_cql3_query_args() { +- } +- +- public execute_prepared_cql3_query_args( +- int itemId, +- List values, +- ConsistencyLevel consistency) +- { +- this(); +- this.itemId = itemId; +- setItemIdIsSet(true); +- this.values = values; +- this.consistency = consistency; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public execute_prepared_cql3_query_args(execute_prepared_cql3_query_args other) { +- __isset_bitfield = other.__isset_bitfield; +- this.itemId = other.itemId; +- if (other.isSetValues()) { +- List __this__values = new ArrayList(other.values); +- this.values = __this__values; +- } +- if (other.isSetConsistency()) { +- this.consistency = other.consistency; +- } +- } +- +- public execute_prepared_cql3_query_args deepCopy() { +- return new execute_prepared_cql3_query_args(this); +- } +- +- @Override +- public void clear() { +- setItemIdIsSet(false); +- this.itemId = 0; +- this.values = null; +- this.consistency = null; +- } +- +- public int getItemId() { +- return this.itemId; +- } +- +- public execute_prepared_cql3_query_args setItemId(int itemId) { +- this.itemId = itemId; +- setItemIdIsSet(true); +- return this; +- } +- +- public void unsetItemId() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ITEMID_ISSET_ID); +- } +- +- /** Returns true if field itemId is set (has been assigned a value) and false otherwise */ +- public boolean isSetItemId() { +- return EncodingUtils.testBit(__isset_bitfield, __ITEMID_ISSET_ID); +- } +- +- public void setItemIdIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ITEMID_ISSET_ID, value); +- } +- +- public int getValuesSize() { +- return (this.values == null) ? 0 : this.values.size(); +- } +- +- public java.util.Iterator getValuesIterator() { +- return (this.values == null) ? null : this.values.iterator(); +- } +- +- public void addToValues(ByteBuffer elem) { +- if (this.values == null) { +- this.values = new ArrayList(); +- } +- this.values.add(elem); +- } +- +- public List getValues() { +- return this.values; +- } +- +- public execute_prepared_cql3_query_args setValues(List values) { +- this.values = values; +- return this; +- } +- +- public void unsetValues() { +- this.values = null; +- } +- +- /** Returns true if field values is set (has been assigned a value) and false otherwise */ +- public boolean isSetValues() { +- return this.values != null; +- } +- +- public void setValuesIsSet(boolean value) { +- if (!value) { +- this.values = null; +- } +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency() { +- return this.consistency; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public execute_prepared_cql3_query_args setConsistency(ConsistencyLevel consistency) { +- this.consistency = consistency; +- return this; +- } +- +- public void unsetConsistency() { +- this.consistency = null; +- } +- +- /** Returns true if field consistency is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency() { +- return this.consistency != null; +- } +- +- public void setConsistencyIsSet(boolean value) { +- if (!value) { +- this.consistency = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case ITEM_ID: +- if (value == null) { +- unsetItemId(); +- } else { +- setItemId((Integer)value); +- } +- break; +- +- case VALUES: +- if (value == null) { +- unsetValues(); +- } else { +- setValues((List)value); +- } +- break; +- +- case CONSISTENCY: +- if (value == null) { +- unsetConsistency(); +- } else { +- setConsistency((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case ITEM_ID: +- return Integer.valueOf(getItemId()); +- +- case VALUES: +- return getValues(); +- +- case CONSISTENCY: +- return getConsistency(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case ITEM_ID: +- return isSetItemId(); +- case VALUES: +- return isSetValues(); +- case CONSISTENCY: +- return isSetConsistency(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof execute_prepared_cql3_query_args) +- return this.equals((execute_prepared_cql3_query_args)that); +- return false; +- } +- +- public boolean equals(execute_prepared_cql3_query_args that) { +- if (that == null) +- return false; +- +- boolean this_present_itemId = true; +- boolean that_present_itemId = true; +- if (this_present_itemId || that_present_itemId) { +- if (!(this_present_itemId && that_present_itemId)) +- return false; +- if (this.itemId != that.itemId) +- return false; +- } +- +- boolean this_present_values = true && this.isSetValues(); +- boolean that_present_values = true && that.isSetValues(); +- if (this_present_values || that_present_values) { +- if (!(this_present_values && that_present_values)) +- return false; +- if (!this.values.equals(that.values)) +- return false; +- } +- +- boolean this_present_consistency = true && this.isSetConsistency(); +- boolean that_present_consistency = true && that.isSetConsistency(); +- if (this_present_consistency || that_present_consistency) { +- if (!(this_present_consistency && that_present_consistency)) +- return false; +- if (!this.consistency.equals(that.consistency)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_itemId = true; +- builder.append(present_itemId); +- if (present_itemId) +- builder.append(itemId); +- +- boolean present_values = true && (isSetValues()); +- builder.append(present_values); +- if (present_values) +- builder.append(values); +- +- boolean present_consistency = true && (isSetConsistency()); +- builder.append(present_consistency); +- if (present_consistency) +- builder.append(consistency.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(execute_prepared_cql3_query_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetItemId()).compareTo(other.isSetItemId()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetItemId()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.itemId, other.itemId); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetValues()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency()).compareTo(other.isSetConsistency()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency, other.consistency); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("execute_prepared_cql3_query_args("); +- boolean first = true; +- +- sb.append("itemId:"); +- sb.append(this.itemId); +- first = false; +- if (!first) sb.append(", "); +- sb.append("values:"); +- if (this.values == null) { +- sb.append("null"); +- } else { +- sb.append(this.values); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("consistency:"); +- if (this.consistency == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // alas, we cannot check 'itemId' because it's a primitive and you chose the non-beans generator. +- if (values == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' was not present! Struct: " + toString()); +- } +- if (consistency == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'consistency' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class execute_prepared_cql3_query_argsStandardSchemeFactory implements SchemeFactory { +- public execute_prepared_cql3_query_argsStandardScheme getScheme() { +- return new execute_prepared_cql3_query_argsStandardScheme(); +- } +- } +- +- private static class execute_prepared_cql3_query_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, execute_prepared_cql3_query_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // ITEM_ID +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.itemId = iprot.readI32(); +- struct.setItemIdIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // VALUES +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list456 = iprot.readListBegin(); +- struct.values = new ArrayList(_list456.size); +- for (int _i457 = 0; _i457 < _list456.size; ++_i457) +- { +- ByteBuffer _elem458; +- _elem458 = iprot.readBinary(); +- struct.values.add(_elem458); +- } +- iprot.readListEnd(); +- } +- struct.setValuesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // CONSISTENCY +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistencyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetItemId()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'itemId' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, execute_prepared_cql3_query_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldBegin(ITEM_ID_FIELD_DESC); +- oprot.writeI32(struct.itemId); +- oprot.writeFieldEnd(); +- if (struct.values != null) { +- oprot.writeFieldBegin(VALUES_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); +- for (ByteBuffer _iter459 : struct.values) +- { +- oprot.writeBinary(_iter459); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.consistency != null) { +- oprot.writeFieldBegin(CONSISTENCY_FIELD_DESC); +- oprot.writeI32(struct.consistency.getValue()); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class execute_prepared_cql3_query_argsTupleSchemeFactory implements SchemeFactory { +- public execute_prepared_cql3_query_argsTupleScheme getScheme() { +- return new execute_prepared_cql3_query_argsTupleScheme(); +- } +- } +- +- private static class execute_prepared_cql3_query_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, execute_prepared_cql3_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeI32(struct.itemId); +- { +- oprot.writeI32(struct.values.size()); +- for (ByteBuffer _iter460 : struct.values) +- { +- oprot.writeBinary(_iter460); +- } +- } +- oprot.writeI32(struct.consistency.getValue()); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, execute_prepared_cql3_query_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.itemId = iprot.readI32(); +- struct.setItemIdIsSet(true); +- { +- org.apache.thrift.protocol.TList _list461 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.values = new ArrayList(_list461.size); +- for (int _i462 = 0; _i462 < _list461.size; ++_i462) +- { +- ByteBuffer _elem463; +- _elem463 = iprot.readBinary(); +- struct.values.add(_elem463); +- } +- } +- struct.setValuesIsSet(true); +- struct.consistency = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistencyIsSet(true); +- } +- } +- +- } +- +- public static class execute_prepared_cql3_query_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("execute_prepared_cql3_query_result"); +- +- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField UE_FIELD_DESC = new org.apache.thrift.protocol.TField("ue", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField TE_FIELD_DESC = new org.apache.thrift.protocol.TField("te", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField SDE_FIELD_DESC = new org.apache.thrift.protocol.TField("sde", org.apache.thrift.protocol.TType.STRUCT, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new execute_prepared_cql3_query_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new execute_prepared_cql3_query_resultTupleSchemeFactory()); +- } +- +- public CqlResult success; // required +- public InvalidRequestException ire; // required +- public UnavailableException ue; // required +- public TimedOutException te; // required +- public SchemaDisagreementException sde; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- SUCCESS((short)0, "success"), +- IRE((short)1, "ire"), +- UE((short)2, "ue"), +- TE((short)3, "te"), +- SDE((short)4, "sde"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 0: // SUCCESS +- return SUCCESS; +- case 1: // IRE +- return IRE; +- case 2: // UE +- return UE; +- case 3: // TE +- return TE; +- case 4: // SDE +- return SDE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CqlResult.class))); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.UE, new org.apache.thrift.meta_data.FieldMetaData("ue", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.TE, new org.apache.thrift.meta_data.FieldMetaData("te", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- tmpMap.put(_Fields.SDE, new org.apache.thrift.meta_data.FieldMetaData("sde", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_prepared_cql3_query_result.class, metaDataMap); +- } +- +- public execute_prepared_cql3_query_result() { +- } +- +- public execute_prepared_cql3_query_result( +- CqlResult success, +- InvalidRequestException ire, +- UnavailableException ue, +- TimedOutException te, +- SchemaDisagreementException sde) +- { +- this(); +- this.success = success; +- this.ire = ire; +- this.ue = ue; +- this.te = te; +- this.sde = sde; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public execute_prepared_cql3_query_result(execute_prepared_cql3_query_result other) { +- if (other.isSetSuccess()) { +- this.success = new CqlResult(other.success); +- } +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- if (other.isSetUe()) { +- this.ue = new UnavailableException(other.ue); +- } +- if (other.isSetTe()) { +- this.te = new TimedOutException(other.te); +- } +- if (other.isSetSde()) { +- this.sde = new SchemaDisagreementException(other.sde); +- } +- } +- +- public execute_prepared_cql3_query_result deepCopy() { +- return new execute_prepared_cql3_query_result(this); +- } +- +- @Override +- public void clear() { +- this.success = null; +- this.ire = null; +- this.ue = null; +- this.te = null; +- this.sde = null; +- } +- +- public CqlResult getSuccess() { +- return this.success; +- } +- +- public execute_prepared_cql3_query_result setSuccess(CqlResult success) { +- this.success = success; +- return this; +- } +- +- public void unsetSuccess() { +- this.success = null; +- } +- +- /** Returns true if field success is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuccess() { +- return this.success != null; +- } +- +- public void setSuccessIsSet(boolean value) { +- if (!value) { +- this.success = null; +- } +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public execute_prepared_cql3_query_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public UnavailableException getUe() { +- return this.ue; +- } +- +- public execute_prepared_cql3_query_result setUe(UnavailableException ue) { +- this.ue = ue; +- return this; +- } +- +- public void unsetUe() { +- this.ue = null; +- } +- +- /** Returns true if field ue is set (has been assigned a value) and false otherwise */ +- public boolean isSetUe() { +- return this.ue != null; +- } +- +- public void setUeIsSet(boolean value) { +- if (!value) { +- this.ue = null; +- } +- } +- +- public TimedOutException getTe() { +- return this.te; +- } +- +- public execute_prepared_cql3_query_result setTe(TimedOutException te) { +- this.te = te; +- return this; +- } +- +- public void unsetTe() { +- this.te = null; +- } +- +- /** Returns true if field te is set (has been assigned a value) and false otherwise */ +- public boolean isSetTe() { +- return this.te != null; +- } +- +- public void setTeIsSet(boolean value) { +- if (!value) { +- this.te = null; +- } +- } +- +- public SchemaDisagreementException getSde() { +- return this.sde; +- } +- +- public execute_prepared_cql3_query_result setSde(SchemaDisagreementException sde) { +- this.sde = sde; +- return this; +- } +- +- public void unsetSde() { +- this.sde = null; +- } +- +- /** Returns true if field sde is set (has been assigned a value) and false otherwise */ +- public boolean isSetSde() { +- return this.sde != null; +- } +- +- public void setSdeIsSet(boolean value) { +- if (!value) { +- this.sde = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case SUCCESS: +- if (value == null) { +- unsetSuccess(); +- } else { +- setSuccess((CqlResult)value); +- } +- break; +- +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- case UE: +- if (value == null) { +- unsetUe(); +- } else { +- setUe((UnavailableException)value); +- } +- break; +- +- case TE: +- if (value == null) { +- unsetTe(); +- } else { +- setTe((TimedOutException)value); +- } +- break; +- +- case SDE: +- if (value == null) { +- unsetSde(); +- } else { +- setSde((SchemaDisagreementException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case SUCCESS: +- return getSuccess(); +- +- case IRE: +- return getIre(); +- +- case UE: +- return getUe(); +- +- case TE: +- return getTe(); +- +- case SDE: +- return getSde(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case SUCCESS: +- return isSetSuccess(); +- case IRE: +- return isSetIre(); +- case UE: +- return isSetUe(); +- case TE: +- return isSetTe(); +- case SDE: +- return isSetSde(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof execute_prepared_cql3_query_result) +- return this.equals((execute_prepared_cql3_query_result)that); +- return false; +- } +- +- public boolean equals(execute_prepared_cql3_query_result that) { +- if (that == null) +- return false; +- +- boolean this_present_success = true && this.isSetSuccess(); +- boolean that_present_success = true && that.isSetSuccess(); +- if (this_present_success || that_present_success) { +- if (!(this_present_success && that_present_success)) +- return false; +- if (!this.success.equals(that.success)) +- return false; +- } +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- boolean this_present_ue = true && this.isSetUe(); +- boolean that_present_ue = true && that.isSetUe(); +- if (this_present_ue || that_present_ue) { +- if (!(this_present_ue && that_present_ue)) +- return false; +- if (!this.ue.equals(that.ue)) +- return false; +- } +- +- boolean this_present_te = true && this.isSetTe(); +- boolean that_present_te = true && that.isSetTe(); +- if (this_present_te || that_present_te) { +- if (!(this_present_te && that_present_te)) +- return false; +- if (!this.te.equals(that.te)) +- return false; +- } +- +- boolean this_present_sde = true && this.isSetSde(); +- boolean that_present_sde = true && that.isSetSde(); +- if (this_present_sde || that_present_sde) { +- if (!(this_present_sde && that_present_sde)) +- return false; +- if (!this.sde.equals(that.sde)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_success = true && (isSetSuccess()); +- builder.append(present_success); +- if (present_success) +- builder.append(success); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- boolean present_ue = true && (isSetUe()); +- builder.append(present_ue); +- if (present_ue) +- builder.append(ue); +- +- boolean present_te = true && (isSetTe()); +- builder.append(present_te); +- if (present_te) +- builder.append(te); +- +- boolean present_sde = true && (isSetSde()); +- builder.append(present_sde); +- if (present_sde) +- builder.append(sde); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(execute_prepared_cql3_query_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuccess()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetUe()).compareTo(other.isSetUe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetUe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ue, other.ue); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTe()).compareTo(other.isSetTe()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTe()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.te, other.te); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSde()).compareTo(other.isSetSde()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSde()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sde, other.sde); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("execute_prepared_cql3_query_result("); +- boolean first = true; +- +- sb.append("success:"); +- if (this.success == null) { +- sb.append("null"); +- } else { +- sb.append(this.success); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("ue:"); +- if (this.ue == null) { +- sb.append("null"); +- } else { +- sb.append(this.ue); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("te:"); +- if (this.te == null) { +- sb.append("null"); +- } else { +- sb.append(this.te); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("sde:"); +- if (this.sde == null) { +- sb.append("null"); +- } else { +- sb.append(this.sde); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (success != null) { +- success.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class execute_prepared_cql3_query_resultStandardSchemeFactory implements SchemeFactory { +- public execute_prepared_cql3_query_resultStandardScheme getScheme() { +- return new execute_prepared_cql3_query_resultStandardScheme(); +- } +- } +- +- private static class execute_prepared_cql3_query_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, execute_prepared_cql3_query_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 0: // SUCCESS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.success = new CqlResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // UE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // SDE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, execute_prepared_cql3_query_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.success != null) { +- oprot.writeFieldBegin(SUCCESS_FIELD_DESC); +- struct.success.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.ue != null) { +- oprot.writeFieldBegin(UE_FIELD_DESC); +- struct.ue.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.te != null) { +- oprot.writeFieldBegin(TE_FIELD_DESC); +- struct.te.write(oprot); +- oprot.writeFieldEnd(); +- } +- if (struct.sde != null) { +- oprot.writeFieldBegin(SDE_FIELD_DESC); +- struct.sde.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class execute_prepared_cql3_query_resultTupleSchemeFactory implements SchemeFactory { +- public execute_prepared_cql3_query_resultTupleScheme getScheme() { +- return new execute_prepared_cql3_query_resultTupleScheme(); +- } +- } +- +- private static class execute_prepared_cql3_query_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, execute_prepared_cql3_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetSuccess()) { +- optionals.set(0); +- } +- if (struct.isSetIre()) { +- optionals.set(1); +- } +- if (struct.isSetUe()) { +- optionals.set(2); +- } +- if (struct.isSetTe()) { +- optionals.set(3); +- } +- if (struct.isSetSde()) { +- optionals.set(4); +- } +- oprot.writeBitSet(optionals, 5); +- if (struct.isSetSuccess()) { +- struct.success.write(oprot); +- } +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- if (struct.isSetUe()) { +- struct.ue.write(oprot); +- } +- if (struct.isSetTe()) { +- struct.te.write(oprot); +- } +- if (struct.isSetSde()) { +- struct.sde.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, execute_prepared_cql3_query_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(5); +- if (incoming.get(0)) { +- struct.success = new CqlResult(); +- struct.success.read(iprot); +- struct.setSuccessIsSet(true); +- } +- if (incoming.get(1)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ue = new UnavailableException(); +- struct.ue.read(iprot); +- struct.setUeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.te = new TimedOutException(); +- struct.te.read(iprot); +- struct.setTeIsSet(true); +- } +- if (incoming.get(4)) { +- struct.sde = new SchemaDisagreementException(); +- struct.sde.read(iprot); +- struct.setSdeIsSet(true); +- } +- } +- } +- +- } +- +- public static class set_cql_version_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("set_cql_version_args"); +- +- private static final org.apache.thrift.protocol.TField VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("version", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new set_cql_version_argsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new set_cql_version_argsTupleSchemeFactory()); +- } +- +- public String version; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- VERSION((short)1, "version"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // VERSION +- return VERSION; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.VERSION, new org.apache.thrift.meta_data.FieldMetaData("version", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(set_cql_version_args.class, metaDataMap); +- } +- +- public set_cql_version_args() { +- } +- +- public set_cql_version_args( +- String version) +- { +- this(); +- this.version = version; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public set_cql_version_args(set_cql_version_args other) { +- if (other.isSetVersion()) { +- this.version = other.version; +- } +- } +- +- public set_cql_version_args deepCopy() { +- return new set_cql_version_args(this); +- } +- +- @Override +- public void clear() { +- this.version = null; +- } +- +- public String getVersion() { +- return this.version; +- } +- +- public set_cql_version_args setVersion(String version) { +- this.version = version; +- return this; +- } +- +- public void unsetVersion() { +- this.version = null; +- } +- +- /** Returns true if field version is set (has been assigned a value) and false otherwise */ +- public boolean isSetVersion() { +- return this.version != null; +- } +- +- public void setVersionIsSet(boolean value) { +- if (!value) { +- this.version = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case VERSION: +- if (value == null) { +- unsetVersion(); +- } else { +- setVersion((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case VERSION: +- return getVersion(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case VERSION: +- return isSetVersion(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof set_cql_version_args) +- return this.equals((set_cql_version_args)that); +- return false; +- } +- +- public boolean equals(set_cql_version_args that) { +- if (that == null) +- return false; +- +- boolean this_present_version = true && this.isSetVersion(); +- boolean that_present_version = true && that.isSetVersion(); +- if (this_present_version || that_present_version) { +- if (!(this_present_version && that_present_version)) +- return false; +- if (!this.version.equals(that.version)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_version = true && (isSetVersion()); +- builder.append(present_version); +- if (present_version) +- builder.append(version); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(set_cql_version_args other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetVersion()).compareTo(other.isSetVersion()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetVersion()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.version, other.version); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("set_cql_version_args("); +- boolean first = true; +- +- sb.append("version:"); +- if (this.version == null) { +- sb.append("null"); +- } else { +- sb.append(this.version); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (version == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'version' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class set_cql_version_argsStandardSchemeFactory implements SchemeFactory { +- public set_cql_version_argsStandardScheme getScheme() { +- return new set_cql_version_argsStandardScheme(); +- } +- } +- +- private static class set_cql_version_argsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, set_cql_version_args struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // VERSION +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.version = iprot.readString(); +- struct.setVersionIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, set_cql_version_args struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.version != null) { +- oprot.writeFieldBegin(VERSION_FIELD_DESC); +- oprot.writeString(struct.version); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class set_cql_version_argsTupleSchemeFactory implements SchemeFactory { +- public set_cql_version_argsTupleScheme getScheme() { +- return new set_cql_version_argsTupleScheme(); +- } +- } +- +- private static class set_cql_version_argsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, set_cql_version_args struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.version); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, set_cql_version_args struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.version = iprot.readString(); +- struct.setVersionIsSet(true); +- } +- } +- +- } +- +- public static class set_cql_version_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("set_cql_version_result"); +- +- private static final org.apache.thrift.protocol.TField IRE_FIELD_DESC = new org.apache.thrift.protocol.TField("ire", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new set_cql_version_resultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new set_cql_version_resultTupleSchemeFactory()); +- } +- +- public InvalidRequestException ire; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- IRE((short)1, "ire"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // IRE +- return IRE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.IRE, new org.apache.thrift.meta_data.FieldMetaData("ire", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(set_cql_version_result.class, metaDataMap); +- } +- +- public set_cql_version_result() { +- } +- +- public set_cql_version_result( +- InvalidRequestException ire) +- { +- this(); +- this.ire = ire; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public set_cql_version_result(set_cql_version_result other) { +- if (other.isSetIre()) { +- this.ire = new InvalidRequestException(other.ire); +- } +- } +- +- public set_cql_version_result deepCopy() { +- return new set_cql_version_result(this); +- } +- +- @Override +- public void clear() { +- this.ire = null; +- } +- +- public InvalidRequestException getIre() { +- return this.ire; +- } +- +- public set_cql_version_result setIre(InvalidRequestException ire) { +- this.ire = ire; +- return this; +- } +- +- public void unsetIre() { +- this.ire = null; +- } +- +- /** Returns true if field ire is set (has been assigned a value) and false otherwise */ +- public boolean isSetIre() { +- return this.ire != null; +- } +- +- public void setIreIsSet(boolean value) { +- if (!value) { +- this.ire = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case IRE: +- if (value == null) { +- unsetIre(); +- } else { +- setIre((InvalidRequestException)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case IRE: +- return getIre(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case IRE: +- return isSetIre(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof set_cql_version_result) +- return this.equals((set_cql_version_result)that); +- return false; +- } +- +- public boolean equals(set_cql_version_result that) { +- if (that == null) +- return false; +- +- boolean this_present_ire = true && this.isSetIre(); +- boolean that_present_ire = true && that.isSetIre(); +- if (this_present_ire || that_present_ire) { +- if (!(this_present_ire && that_present_ire)) +- return false; +- if (!this.ire.equals(that.ire)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_ire = true && (isSetIre()); +- builder.append(present_ire); +- if (present_ire) +- builder.append(ire); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(set_cql_version_result other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetIre()).compareTo(other.isSetIre()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIre()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ire, other.ire); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("set_cql_version_result("); +- boolean first = true; +- +- sb.append("ire:"); +- if (this.ire == null) { +- sb.append("null"); +- } else { +- sb.append(this.ire); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class set_cql_version_resultStandardSchemeFactory implements SchemeFactory { +- public set_cql_version_resultStandardScheme getScheme() { +- return new set_cql_version_resultStandardScheme(); +- } +- } +- +- private static class set_cql_version_resultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, set_cql_version_result struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // IRE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, set_cql_version_result struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.ire != null) { +- oprot.writeFieldBegin(IRE_FIELD_DESC); +- struct.ire.write(oprot); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class set_cql_version_resultTupleSchemeFactory implements SchemeFactory { +- public set_cql_version_resultTupleScheme getScheme() { +- return new set_cql_version_resultTupleScheme(); +- } +- } +- +- private static class set_cql_version_resultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, set_cql_version_result struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetIre()) { +- optionals.set(0); +- } +- oprot.writeBitSet(optionals, 1); +- if (struct.isSetIre()) { +- struct.ire.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, set_cql_version_result struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(1); +- if (incoming.get(0)) { +- struct.ire = new InvalidRequestException(); +- struct.ire.read(iprot); +- struct.setIreIsSet(true); +- } +- } +- } +- +- } +- +-} +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CfDef.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CfDef.java +deleted file mode 100644 +index ec10050..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CfDef.java ++++ /dev/null +@@ -1,4927 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class CfDef implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CfDef"); +- +- private static final org.apache.thrift.protocol.TField KEYSPACE_FIELD_DESC = new org.apache.thrift.protocol.TField("keyspace", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField COLUMN_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("column_type", org.apache.thrift.protocol.TType.STRING, (short)3); +- private static final org.apache.thrift.protocol.TField COMPARATOR_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("comparator_type", org.apache.thrift.protocol.TType.STRING, (short)5); +- private static final org.apache.thrift.protocol.TField SUBCOMPARATOR_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("subcomparator_type", org.apache.thrift.protocol.TType.STRING, (short)6); +- private static final org.apache.thrift.protocol.TField COMMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("comment", org.apache.thrift.protocol.TType.STRING, (short)8); +- private static final org.apache.thrift.protocol.TField READ_REPAIR_CHANCE_FIELD_DESC = new org.apache.thrift.protocol.TField("read_repair_chance", org.apache.thrift.protocol.TType.DOUBLE, (short)12); +- private static final org.apache.thrift.protocol.TField COLUMN_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("column_metadata", org.apache.thrift.protocol.TType.LIST, (short)13); +- private static final org.apache.thrift.protocol.TField GC_GRACE_SECONDS_FIELD_DESC = new org.apache.thrift.protocol.TField("gc_grace_seconds", org.apache.thrift.protocol.TType.I32, (short)14); +- private static final org.apache.thrift.protocol.TField DEFAULT_VALIDATION_CLASS_FIELD_DESC = new org.apache.thrift.protocol.TField("default_validation_class", org.apache.thrift.protocol.TType.STRING, (short)15); +- private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I32, (short)16); +- private static final org.apache.thrift.protocol.TField MIN_COMPACTION_THRESHOLD_FIELD_DESC = new org.apache.thrift.protocol.TField("min_compaction_threshold", org.apache.thrift.protocol.TType.I32, (short)17); +- private static final org.apache.thrift.protocol.TField MAX_COMPACTION_THRESHOLD_FIELD_DESC = new org.apache.thrift.protocol.TField("max_compaction_threshold", org.apache.thrift.protocol.TType.I32, (short)18); +- private static final org.apache.thrift.protocol.TField KEY_VALIDATION_CLASS_FIELD_DESC = new org.apache.thrift.protocol.TField("key_validation_class", org.apache.thrift.protocol.TType.STRING, (short)26); +- private static final org.apache.thrift.protocol.TField KEY_ALIAS_FIELD_DESC = new org.apache.thrift.protocol.TField("key_alias", org.apache.thrift.protocol.TType.STRING, (short)28); +- private static final org.apache.thrift.protocol.TField COMPACTION_STRATEGY_FIELD_DESC = new org.apache.thrift.protocol.TField("compaction_strategy", org.apache.thrift.protocol.TType.STRING, (short)29); +- private static final org.apache.thrift.protocol.TField COMPACTION_STRATEGY_OPTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("compaction_strategy_options", org.apache.thrift.protocol.TType.MAP, (short)30); +- private static final org.apache.thrift.protocol.TField COMPRESSION_OPTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("compression_options", org.apache.thrift.protocol.TType.MAP, (short)32); +- private static final org.apache.thrift.protocol.TField BLOOM_FILTER_FP_CHANCE_FIELD_DESC = new org.apache.thrift.protocol.TField("bloom_filter_fp_chance", org.apache.thrift.protocol.TType.DOUBLE, (short)33); +- private static final org.apache.thrift.protocol.TField CACHING_FIELD_DESC = new org.apache.thrift.protocol.TField("caching", org.apache.thrift.protocol.TType.STRING, (short)34); +- private static final org.apache.thrift.protocol.TField DCLOCAL_READ_REPAIR_CHANCE_FIELD_DESC = new org.apache.thrift.protocol.TField("dclocal_read_repair_chance", org.apache.thrift.protocol.TType.DOUBLE, (short)37); +- private static final org.apache.thrift.protocol.TField MEMTABLE_FLUSH_PERIOD_IN_MS_FIELD_DESC = new org.apache.thrift.protocol.TField("memtable_flush_period_in_ms", org.apache.thrift.protocol.TType.I32, (short)39); +- private static final org.apache.thrift.protocol.TField DEFAULT_TIME_TO_LIVE_FIELD_DESC = new org.apache.thrift.protocol.TField("default_time_to_live", org.apache.thrift.protocol.TType.I32, (short)40); +- private static final org.apache.thrift.protocol.TField SPECULATIVE_RETRY_FIELD_DESC = new org.apache.thrift.protocol.TField("speculative_retry", org.apache.thrift.protocol.TType.STRING, (short)42); +- private static final org.apache.thrift.protocol.TField TRIGGERS_FIELD_DESC = new org.apache.thrift.protocol.TField("triggers", org.apache.thrift.protocol.TType.LIST, (short)43); +- private static final org.apache.thrift.protocol.TField CELLS_PER_ROW_TO_CACHE_FIELD_DESC = new org.apache.thrift.protocol.TField("cells_per_row_to_cache", org.apache.thrift.protocol.TType.STRING, (short)44); +- private static final org.apache.thrift.protocol.TField MIN_INDEX_INTERVAL_FIELD_DESC = new org.apache.thrift.protocol.TField("min_index_interval", org.apache.thrift.protocol.TType.I32, (short)45); +- private static final org.apache.thrift.protocol.TField MAX_INDEX_INTERVAL_FIELD_DESC = new org.apache.thrift.protocol.TField("max_index_interval", org.apache.thrift.protocol.TType.I32, (short)46); +- private static final org.apache.thrift.protocol.TField ROW_CACHE_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("row_cache_size", org.apache.thrift.protocol.TType.DOUBLE, (short)9); +- private static final org.apache.thrift.protocol.TField KEY_CACHE_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("key_cache_size", org.apache.thrift.protocol.TType.DOUBLE, (short)11); +- private static final org.apache.thrift.protocol.TField ROW_CACHE_SAVE_PERIOD_IN_SECONDS_FIELD_DESC = new org.apache.thrift.protocol.TField("row_cache_save_period_in_seconds", org.apache.thrift.protocol.TType.I32, (short)19); +- private static final org.apache.thrift.protocol.TField KEY_CACHE_SAVE_PERIOD_IN_SECONDS_FIELD_DESC = new org.apache.thrift.protocol.TField("key_cache_save_period_in_seconds", org.apache.thrift.protocol.TType.I32, (short)20); +- private static final org.apache.thrift.protocol.TField MEMTABLE_FLUSH_AFTER_MINS_FIELD_DESC = new org.apache.thrift.protocol.TField("memtable_flush_after_mins", org.apache.thrift.protocol.TType.I32, (short)21); +- private static final org.apache.thrift.protocol.TField MEMTABLE_THROUGHPUT_IN_MB_FIELD_DESC = new org.apache.thrift.protocol.TField("memtable_throughput_in_mb", org.apache.thrift.protocol.TType.I32, (short)22); +- private static final org.apache.thrift.protocol.TField MEMTABLE_OPERATIONS_IN_MILLIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("memtable_operations_in_millions", org.apache.thrift.protocol.TType.DOUBLE, (short)23); +- private static final org.apache.thrift.protocol.TField REPLICATE_ON_WRITE_FIELD_DESC = new org.apache.thrift.protocol.TField("replicate_on_write", org.apache.thrift.protocol.TType.BOOL, (short)24); +- private static final org.apache.thrift.protocol.TField MERGE_SHARDS_CHANCE_FIELD_DESC = new org.apache.thrift.protocol.TField("merge_shards_chance", org.apache.thrift.protocol.TType.DOUBLE, (short)25); +- private static final org.apache.thrift.protocol.TField ROW_CACHE_PROVIDER_FIELD_DESC = new org.apache.thrift.protocol.TField("row_cache_provider", org.apache.thrift.protocol.TType.STRING, (short)27); +- private static final org.apache.thrift.protocol.TField ROW_CACHE_KEYS_TO_SAVE_FIELD_DESC = new org.apache.thrift.protocol.TField("row_cache_keys_to_save", org.apache.thrift.protocol.TType.I32, (short)31); +- private static final org.apache.thrift.protocol.TField POPULATE_IO_CACHE_ON_FLUSH_FIELD_DESC = new org.apache.thrift.protocol.TField("populate_io_cache_on_flush", org.apache.thrift.protocol.TType.BOOL, (short)38); +- private static final org.apache.thrift.protocol.TField INDEX_INTERVAL_FIELD_DESC = new org.apache.thrift.protocol.TField("index_interval", org.apache.thrift.protocol.TType.I32, (short)41); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new CfDefStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new CfDefTupleSchemeFactory()); +- } +- +- public String keyspace; // required +- public String name; // required +- public String column_type; // optional +- public String comparator_type; // optional +- public String subcomparator_type; // optional +- public String comment; // optional +- public double read_repair_chance; // optional +- public List column_metadata; // optional +- public int gc_grace_seconds; // optional +- public String default_validation_class; // optional +- public int id; // optional +- public int min_compaction_threshold; // optional +- public int max_compaction_threshold; // optional +- public String key_validation_class; // optional +- public ByteBuffer key_alias; // optional +- public String compaction_strategy; // optional +- public Map compaction_strategy_options; // optional +- public Map compression_options; // optional +- public double bloom_filter_fp_chance; // optional +- public String caching; // optional +- public double dclocal_read_repair_chance; // optional +- public int memtable_flush_period_in_ms; // optional +- public int default_time_to_live; // optional +- public String speculative_retry; // optional +- public List triggers; // optional +- public String cells_per_row_to_cache; // optional +- public int min_index_interval; // optional +- public int max_index_interval; // optional +- /** +- * @deprecated +- */ +- public double row_cache_size; // optional +- /** +- * @deprecated +- */ +- public double key_cache_size; // optional +- /** +- * @deprecated +- */ +- public int row_cache_save_period_in_seconds; // optional +- /** +- * @deprecated +- */ +- public int key_cache_save_period_in_seconds; // optional +- /** +- * @deprecated +- */ +- public int memtable_flush_after_mins; // optional +- /** +- * @deprecated +- */ +- public int memtable_throughput_in_mb; // optional +- /** +- * @deprecated +- */ +- public double memtable_operations_in_millions; // optional +- /** +- * @deprecated +- */ +- public boolean replicate_on_write; // optional +- /** +- * @deprecated +- */ +- public double merge_shards_chance; // optional +- /** +- * @deprecated +- */ +- public String row_cache_provider; // optional +- /** +- * @deprecated +- */ +- public int row_cache_keys_to_save; // optional +- /** +- * @deprecated +- */ +- public boolean populate_io_cache_on_flush; // optional +- /** +- * @deprecated +- */ +- public int index_interval; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEYSPACE((short)1, "keyspace"), +- NAME((short)2, "name"), +- COLUMN_TYPE((short)3, "column_type"), +- COMPARATOR_TYPE((short)5, "comparator_type"), +- SUBCOMPARATOR_TYPE((short)6, "subcomparator_type"), +- COMMENT((short)8, "comment"), +- READ_REPAIR_CHANCE((short)12, "read_repair_chance"), +- COLUMN_METADATA((short)13, "column_metadata"), +- GC_GRACE_SECONDS((short)14, "gc_grace_seconds"), +- DEFAULT_VALIDATION_CLASS((short)15, "default_validation_class"), +- ID((short)16, "id"), +- MIN_COMPACTION_THRESHOLD((short)17, "min_compaction_threshold"), +- MAX_COMPACTION_THRESHOLD((short)18, "max_compaction_threshold"), +- KEY_VALIDATION_CLASS((short)26, "key_validation_class"), +- KEY_ALIAS((short)28, "key_alias"), +- COMPACTION_STRATEGY((short)29, "compaction_strategy"), +- COMPACTION_STRATEGY_OPTIONS((short)30, "compaction_strategy_options"), +- COMPRESSION_OPTIONS((short)32, "compression_options"), +- BLOOM_FILTER_FP_CHANCE((short)33, "bloom_filter_fp_chance"), +- CACHING((short)34, "caching"), +- DCLOCAL_READ_REPAIR_CHANCE((short)37, "dclocal_read_repair_chance"), +- MEMTABLE_FLUSH_PERIOD_IN_MS((short)39, "memtable_flush_period_in_ms"), +- DEFAULT_TIME_TO_LIVE((short)40, "default_time_to_live"), +- SPECULATIVE_RETRY((short)42, "speculative_retry"), +- TRIGGERS((short)43, "triggers"), +- CELLS_PER_ROW_TO_CACHE((short)44, "cells_per_row_to_cache"), +- MIN_INDEX_INTERVAL((short)45, "min_index_interval"), +- MAX_INDEX_INTERVAL((short)46, "max_index_interval"), +- /** +- * @deprecated +- */ +- ROW_CACHE_SIZE((short)9, "row_cache_size"), +- /** +- * @deprecated +- */ +- KEY_CACHE_SIZE((short)11, "key_cache_size"), +- /** +- * @deprecated +- */ +- ROW_CACHE_SAVE_PERIOD_IN_SECONDS((short)19, "row_cache_save_period_in_seconds"), +- /** +- * @deprecated +- */ +- KEY_CACHE_SAVE_PERIOD_IN_SECONDS((short)20, "key_cache_save_period_in_seconds"), +- /** +- * @deprecated +- */ +- MEMTABLE_FLUSH_AFTER_MINS((short)21, "memtable_flush_after_mins"), +- /** +- * @deprecated +- */ +- MEMTABLE_THROUGHPUT_IN_MB((short)22, "memtable_throughput_in_mb"), +- /** +- * @deprecated +- */ +- MEMTABLE_OPERATIONS_IN_MILLIONS((short)23, "memtable_operations_in_millions"), +- /** +- * @deprecated +- */ +- REPLICATE_ON_WRITE((short)24, "replicate_on_write"), +- /** +- * @deprecated +- */ +- MERGE_SHARDS_CHANCE((short)25, "merge_shards_chance"), +- /** +- * @deprecated +- */ +- ROW_CACHE_PROVIDER((short)27, "row_cache_provider"), +- /** +- * @deprecated +- */ +- ROW_CACHE_KEYS_TO_SAVE((short)31, "row_cache_keys_to_save"), +- /** +- * @deprecated +- */ +- POPULATE_IO_CACHE_ON_FLUSH((short)38, "populate_io_cache_on_flush"), +- /** +- * @deprecated +- */ +- INDEX_INTERVAL((short)41, "index_interval"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEYSPACE +- return KEYSPACE; +- case 2: // NAME +- return NAME; +- case 3: // COLUMN_TYPE +- return COLUMN_TYPE; +- case 5: // COMPARATOR_TYPE +- return COMPARATOR_TYPE; +- case 6: // SUBCOMPARATOR_TYPE +- return SUBCOMPARATOR_TYPE; +- case 8: // COMMENT +- return COMMENT; +- case 12: // READ_REPAIR_CHANCE +- return READ_REPAIR_CHANCE; +- case 13: // COLUMN_METADATA +- return COLUMN_METADATA; +- case 14: // GC_GRACE_SECONDS +- return GC_GRACE_SECONDS; +- case 15: // DEFAULT_VALIDATION_CLASS +- return DEFAULT_VALIDATION_CLASS; +- case 16: // ID +- return ID; +- case 17: // MIN_COMPACTION_THRESHOLD +- return MIN_COMPACTION_THRESHOLD; +- case 18: // MAX_COMPACTION_THRESHOLD +- return MAX_COMPACTION_THRESHOLD; +- case 26: // KEY_VALIDATION_CLASS +- return KEY_VALIDATION_CLASS; +- case 28: // KEY_ALIAS +- return KEY_ALIAS; +- case 29: // COMPACTION_STRATEGY +- return COMPACTION_STRATEGY; +- case 30: // COMPACTION_STRATEGY_OPTIONS +- return COMPACTION_STRATEGY_OPTIONS; +- case 32: // COMPRESSION_OPTIONS +- return COMPRESSION_OPTIONS; +- case 33: // BLOOM_FILTER_FP_CHANCE +- return BLOOM_FILTER_FP_CHANCE; +- case 34: // CACHING +- return CACHING; +- case 37: // DCLOCAL_READ_REPAIR_CHANCE +- return DCLOCAL_READ_REPAIR_CHANCE; +- case 39: // MEMTABLE_FLUSH_PERIOD_IN_MS +- return MEMTABLE_FLUSH_PERIOD_IN_MS; +- case 40: // DEFAULT_TIME_TO_LIVE +- return DEFAULT_TIME_TO_LIVE; +- case 42: // SPECULATIVE_RETRY +- return SPECULATIVE_RETRY; +- case 43: // TRIGGERS +- return TRIGGERS; +- case 44: // CELLS_PER_ROW_TO_CACHE +- return CELLS_PER_ROW_TO_CACHE; +- case 45: // MIN_INDEX_INTERVAL +- return MIN_INDEX_INTERVAL; +- case 46: // MAX_INDEX_INTERVAL +- return MAX_INDEX_INTERVAL; +- case 9: // ROW_CACHE_SIZE +- return ROW_CACHE_SIZE; +- case 11: // KEY_CACHE_SIZE +- return KEY_CACHE_SIZE; +- case 19: // ROW_CACHE_SAVE_PERIOD_IN_SECONDS +- return ROW_CACHE_SAVE_PERIOD_IN_SECONDS; +- case 20: // KEY_CACHE_SAVE_PERIOD_IN_SECONDS +- return KEY_CACHE_SAVE_PERIOD_IN_SECONDS; +- case 21: // MEMTABLE_FLUSH_AFTER_MINS +- return MEMTABLE_FLUSH_AFTER_MINS; +- case 22: // MEMTABLE_THROUGHPUT_IN_MB +- return MEMTABLE_THROUGHPUT_IN_MB; +- case 23: // MEMTABLE_OPERATIONS_IN_MILLIONS +- return MEMTABLE_OPERATIONS_IN_MILLIONS; +- case 24: // REPLICATE_ON_WRITE +- return REPLICATE_ON_WRITE; +- case 25: // MERGE_SHARDS_CHANCE +- return MERGE_SHARDS_CHANCE; +- case 27: // ROW_CACHE_PROVIDER +- return ROW_CACHE_PROVIDER; +- case 31: // ROW_CACHE_KEYS_TO_SAVE +- return ROW_CACHE_KEYS_TO_SAVE; +- case 38: // POPULATE_IO_CACHE_ON_FLUSH +- return POPULATE_IO_CACHE_ON_FLUSH; +- case 41: // INDEX_INTERVAL +- return INDEX_INTERVAL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __READ_REPAIR_CHANCE_ISSET_ID = 0; +- private static final int __GC_GRACE_SECONDS_ISSET_ID = 1; +- private static final int __ID_ISSET_ID = 2; +- private static final int __MIN_COMPACTION_THRESHOLD_ISSET_ID = 3; +- private static final int __MAX_COMPACTION_THRESHOLD_ISSET_ID = 4; +- private static final int __BLOOM_FILTER_FP_CHANCE_ISSET_ID = 5; +- private static final int __DCLOCAL_READ_REPAIR_CHANCE_ISSET_ID = 6; +- private static final int __MEMTABLE_FLUSH_PERIOD_IN_MS_ISSET_ID = 7; +- private static final int __DEFAULT_TIME_TO_LIVE_ISSET_ID = 8; +- private static final int __MIN_INDEX_INTERVAL_ISSET_ID = 9; +- private static final int __MAX_INDEX_INTERVAL_ISSET_ID = 10; +- private static final int __ROW_CACHE_SIZE_ISSET_ID = 11; +- private static final int __KEY_CACHE_SIZE_ISSET_ID = 12; +- private static final int __ROW_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID = 13; +- private static final int __KEY_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID = 14; +- private static final int __MEMTABLE_FLUSH_AFTER_MINS_ISSET_ID = 15; +- private static final int __MEMTABLE_THROUGHPUT_IN_MB_ISSET_ID = 16; +- private static final int __MEMTABLE_OPERATIONS_IN_MILLIONS_ISSET_ID = 17; +- private static final int __REPLICATE_ON_WRITE_ISSET_ID = 18; +- private static final int __MERGE_SHARDS_CHANCE_ISSET_ID = 19; +- private static final int __ROW_CACHE_KEYS_TO_SAVE_ISSET_ID = 20; +- private static final int __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID = 21; +- private static final int __INDEX_INTERVAL_ISSET_ID = 22; +- private int __isset_bitfield = 0; +- private _Fields optionals[] = {_Fields.COLUMN_TYPE,_Fields.COMPARATOR_TYPE,_Fields.SUBCOMPARATOR_TYPE,_Fields.COMMENT,_Fields.READ_REPAIR_CHANCE,_Fields.COLUMN_METADATA,_Fields.GC_GRACE_SECONDS,_Fields.DEFAULT_VALIDATION_CLASS,_Fields.ID,_Fields.MIN_COMPACTION_THRESHOLD,_Fields.MAX_COMPACTION_THRESHOLD,_Fields.KEY_VALIDATION_CLASS,_Fields.KEY_ALIAS,_Fields.COMPACTION_STRATEGY,_Fields.COMPACTION_STRATEGY_OPTIONS,_Fields.COMPRESSION_OPTIONS,_Fields.BLOOM_FILTER_FP_CHANCE,_Fields.CACHING,_Fields.DCLOCAL_READ_REPAIR_CHANCE,_Fields.MEMTABLE_FLUSH_PERIOD_IN_MS,_Fields.DEFAULT_TIME_TO_LIVE,_Fields.SPECULATIVE_RETRY,_Fields.TRIGGERS,_Fields.CELLS_PER_ROW_TO_CACHE,_Fields.MIN_INDEX_INTERVAL,_Fields.MAX_INDEX_INTERVAL,_Fields.ROW_CACHE_SIZE,_Fields.KEY_CACHE_SIZE,_Fields.ROW_CACHE_SAVE_PERIOD_IN_SECONDS,_Fields.KEY_CACHE_SAVE_PERIOD_IN_SECONDS,_Fields.MEMTABLE_FLUSH_AFTER_MINS,_Fields.MEMTABLE_THROUGHPUT_IN_MB,_Fields.MEMTABLE_OPERATIONS_IN_MILLIONS,_Fields.REPLICATE_ON_WRITE,_Fields.MERGE_SHARDS_CHANCE,_Fields.ROW_CACHE_PROVIDER,_Fields.ROW_CACHE_KEYS_TO_SAVE,_Fields.POPULATE_IO_CACHE_ON_FLUSH,_Fields.INDEX_INTERVAL}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEYSPACE, new org.apache.thrift.meta_data.FieldMetaData("keyspace", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.COLUMN_TYPE, new org.apache.thrift.meta_data.FieldMetaData("column_type", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.COMPARATOR_TYPE, new org.apache.thrift.meta_data.FieldMetaData("comparator_type", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.SUBCOMPARATOR_TYPE, new org.apache.thrift.meta_data.FieldMetaData("subcomparator_type", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.COMMENT, new org.apache.thrift.meta_data.FieldMetaData("comment", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.READ_REPAIR_CHANCE, new org.apache.thrift.meta_data.FieldMetaData("read_repair_chance", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); +- tmpMap.put(_Fields.COLUMN_METADATA, new org.apache.thrift.meta_data.FieldMetaData("column_metadata", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnDef.class)))); +- tmpMap.put(_Fields.GC_GRACE_SECONDS, new org.apache.thrift.meta_data.FieldMetaData("gc_grace_seconds", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.DEFAULT_VALIDATION_CLASS, new org.apache.thrift.meta_data.FieldMetaData("default_validation_class", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.MIN_COMPACTION_THRESHOLD, new org.apache.thrift.meta_data.FieldMetaData("min_compaction_threshold", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.MAX_COMPACTION_THRESHOLD, new org.apache.thrift.meta_data.FieldMetaData("max_compaction_threshold", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.KEY_VALIDATION_CLASS, new org.apache.thrift.meta_data.FieldMetaData("key_validation_class", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.KEY_ALIAS, new org.apache.thrift.meta_data.FieldMetaData("key_alias", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COMPACTION_STRATEGY, new org.apache.thrift.meta_data.FieldMetaData("compaction_strategy", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.COMPACTION_STRATEGY_OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("compaction_strategy_options", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- tmpMap.put(_Fields.COMPRESSION_OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("compression_options", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- tmpMap.put(_Fields.BLOOM_FILTER_FP_CHANCE, new org.apache.thrift.meta_data.FieldMetaData("bloom_filter_fp_chance", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); +- tmpMap.put(_Fields.CACHING, new org.apache.thrift.meta_data.FieldMetaData("caching", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.DCLOCAL_READ_REPAIR_CHANCE, new org.apache.thrift.meta_data.FieldMetaData("dclocal_read_repair_chance", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); +- tmpMap.put(_Fields.MEMTABLE_FLUSH_PERIOD_IN_MS, new org.apache.thrift.meta_data.FieldMetaData("memtable_flush_period_in_ms", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.DEFAULT_TIME_TO_LIVE, new org.apache.thrift.meta_data.FieldMetaData("default_time_to_live", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.SPECULATIVE_RETRY, new org.apache.thrift.meta_data.FieldMetaData("speculative_retry", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.TRIGGERS, new org.apache.thrift.meta_data.FieldMetaData("triggers", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TriggerDef.class)))); +- tmpMap.put(_Fields.CELLS_PER_ROW_TO_CACHE, new org.apache.thrift.meta_data.FieldMetaData("cells_per_row_to_cache", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.MIN_INDEX_INTERVAL, new org.apache.thrift.meta_data.FieldMetaData("min_index_interval", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.MAX_INDEX_INTERVAL, new org.apache.thrift.meta_data.FieldMetaData("max_index_interval", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.ROW_CACHE_SIZE, new org.apache.thrift.meta_data.FieldMetaData("row_cache_size", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); +- tmpMap.put(_Fields.KEY_CACHE_SIZE, new org.apache.thrift.meta_data.FieldMetaData("key_cache_size", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); +- tmpMap.put(_Fields.ROW_CACHE_SAVE_PERIOD_IN_SECONDS, new org.apache.thrift.meta_data.FieldMetaData("row_cache_save_period_in_seconds", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.KEY_CACHE_SAVE_PERIOD_IN_SECONDS, new org.apache.thrift.meta_data.FieldMetaData("key_cache_save_period_in_seconds", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.MEMTABLE_FLUSH_AFTER_MINS, new org.apache.thrift.meta_data.FieldMetaData("memtable_flush_after_mins", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.MEMTABLE_THROUGHPUT_IN_MB, new org.apache.thrift.meta_data.FieldMetaData("memtable_throughput_in_mb", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.MEMTABLE_OPERATIONS_IN_MILLIONS, new org.apache.thrift.meta_data.FieldMetaData("memtable_operations_in_millions", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); +- tmpMap.put(_Fields.REPLICATE_ON_WRITE, new org.apache.thrift.meta_data.FieldMetaData("replicate_on_write", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); +- tmpMap.put(_Fields.MERGE_SHARDS_CHANCE, new org.apache.thrift.meta_data.FieldMetaData("merge_shards_chance", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); +- tmpMap.put(_Fields.ROW_CACHE_PROVIDER, new org.apache.thrift.meta_data.FieldMetaData("row_cache_provider", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.ROW_CACHE_KEYS_TO_SAVE, new org.apache.thrift.meta_data.FieldMetaData("row_cache_keys_to_save", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.POPULATE_IO_CACHE_ON_FLUSH, new org.apache.thrift.meta_data.FieldMetaData("populate_io_cache_on_flush", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); +- tmpMap.put(_Fields.INDEX_INTERVAL, new org.apache.thrift.meta_data.FieldMetaData("index_interval", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CfDef.class, metaDataMap); +- } +- +- public CfDef() { +- this.column_type = "Standard"; +- +- this.comparator_type = "BytesType"; +- +- this.caching = "keys_only"; +- +- this.dclocal_read_repair_chance = 0; +- +- this.speculative_retry = "NONE"; +- +- this.cells_per_row_to_cache = "100"; +- +- } +- +- public CfDef( +- String keyspace, +- String name) +- { +- this(); +- this.keyspace = keyspace; +- this.name = name; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public CfDef(CfDef other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetKeyspace()) { +- this.keyspace = other.keyspace; +- } +- if (other.isSetName()) { +- this.name = other.name; +- } +- if (other.isSetColumn_type()) { +- this.column_type = other.column_type; +- } +- if (other.isSetComparator_type()) { +- this.comparator_type = other.comparator_type; +- } +- if (other.isSetSubcomparator_type()) { +- this.subcomparator_type = other.subcomparator_type; +- } +- if (other.isSetComment()) { +- this.comment = other.comment; +- } +- this.read_repair_chance = other.read_repair_chance; +- if (other.isSetColumn_metadata()) { +- List __this__column_metadata = new ArrayList(other.column_metadata.size()); +- for (ColumnDef other_element : other.column_metadata) { +- __this__column_metadata.add(new ColumnDef(other_element)); +- } +- this.column_metadata = __this__column_metadata; +- } +- this.gc_grace_seconds = other.gc_grace_seconds; +- if (other.isSetDefault_validation_class()) { +- this.default_validation_class = other.default_validation_class; +- } +- this.id = other.id; +- this.min_compaction_threshold = other.min_compaction_threshold; +- this.max_compaction_threshold = other.max_compaction_threshold; +- if (other.isSetKey_validation_class()) { +- this.key_validation_class = other.key_validation_class; +- } +- if (other.isSetKey_alias()) { +- this.key_alias = org.apache.thrift.TBaseHelper.copyBinary(other.key_alias); +-; +- } +- if (other.isSetCompaction_strategy()) { +- this.compaction_strategy = other.compaction_strategy; +- } +- if (other.isSetCompaction_strategy_options()) { +- Map __this__compaction_strategy_options = new HashMap(other.compaction_strategy_options); +- this.compaction_strategy_options = __this__compaction_strategy_options; +- } +- if (other.isSetCompression_options()) { +- Map __this__compression_options = new HashMap(other.compression_options); +- this.compression_options = __this__compression_options; +- } +- this.bloom_filter_fp_chance = other.bloom_filter_fp_chance; +- if (other.isSetCaching()) { +- this.caching = other.caching; +- } +- this.dclocal_read_repair_chance = other.dclocal_read_repair_chance; +- this.memtable_flush_period_in_ms = other.memtable_flush_period_in_ms; +- this.default_time_to_live = other.default_time_to_live; +- if (other.isSetSpeculative_retry()) { +- this.speculative_retry = other.speculative_retry; +- } +- if (other.isSetTriggers()) { +- List __this__triggers = new ArrayList(other.triggers.size()); +- for (TriggerDef other_element : other.triggers) { +- __this__triggers.add(new TriggerDef(other_element)); +- } +- this.triggers = __this__triggers; +- } +- if (other.isSetCells_per_row_to_cache()) { +- this.cells_per_row_to_cache = other.cells_per_row_to_cache; +- } +- this.min_index_interval = other.min_index_interval; +- this.max_index_interval = other.max_index_interval; +- this.row_cache_size = other.row_cache_size; +- this.key_cache_size = other.key_cache_size; +- this.row_cache_save_period_in_seconds = other.row_cache_save_period_in_seconds; +- this.key_cache_save_period_in_seconds = other.key_cache_save_period_in_seconds; +- this.memtable_flush_after_mins = other.memtable_flush_after_mins; +- this.memtable_throughput_in_mb = other.memtable_throughput_in_mb; +- this.memtable_operations_in_millions = other.memtable_operations_in_millions; +- this.replicate_on_write = other.replicate_on_write; +- this.merge_shards_chance = other.merge_shards_chance; +- if (other.isSetRow_cache_provider()) { +- this.row_cache_provider = other.row_cache_provider; +- } +- this.row_cache_keys_to_save = other.row_cache_keys_to_save; +- this.populate_io_cache_on_flush = other.populate_io_cache_on_flush; +- this.index_interval = other.index_interval; +- } +- +- public CfDef deepCopy() { +- return new CfDef(this); +- } +- +- @Override +- public void clear() { +- this.keyspace = null; +- this.name = null; +- this.column_type = "Standard"; +- +- this.comparator_type = "BytesType"; +- +- this.subcomparator_type = null; +- this.comment = null; +- setRead_repair_chanceIsSet(false); +- this.read_repair_chance = 0.0; +- this.column_metadata = null; +- setGc_grace_secondsIsSet(false); +- this.gc_grace_seconds = 0; +- this.default_validation_class = null; +- setIdIsSet(false); +- this.id = 0; +- setMin_compaction_thresholdIsSet(false); +- this.min_compaction_threshold = 0; +- setMax_compaction_thresholdIsSet(false); +- this.max_compaction_threshold = 0; +- this.key_validation_class = null; +- this.key_alias = null; +- this.compaction_strategy = null; +- this.compaction_strategy_options = null; +- this.compression_options = null; +- setBloom_filter_fp_chanceIsSet(false); +- this.bloom_filter_fp_chance = 0.0; +- this.caching = "keys_only"; +- +- this.dclocal_read_repair_chance = 0; +- +- setMemtable_flush_period_in_msIsSet(false); +- this.memtable_flush_period_in_ms = 0; +- setDefault_time_to_liveIsSet(false); +- this.default_time_to_live = 0; +- this.speculative_retry = "NONE"; +- +- this.triggers = null; +- this.cells_per_row_to_cache = "100"; +- +- setMin_index_intervalIsSet(false); +- this.min_index_interval = 0; +- setMax_index_intervalIsSet(false); +- this.max_index_interval = 0; +- setRow_cache_sizeIsSet(false); +- this.row_cache_size = 0.0; +- setKey_cache_sizeIsSet(false); +- this.key_cache_size = 0.0; +- setRow_cache_save_period_in_secondsIsSet(false); +- this.row_cache_save_period_in_seconds = 0; +- setKey_cache_save_period_in_secondsIsSet(false); +- this.key_cache_save_period_in_seconds = 0; +- setMemtable_flush_after_minsIsSet(false); +- this.memtable_flush_after_mins = 0; +- setMemtable_throughput_in_mbIsSet(false); +- this.memtable_throughput_in_mb = 0; +- setMemtable_operations_in_millionsIsSet(false); +- this.memtable_operations_in_millions = 0.0; +- setReplicate_on_writeIsSet(false); +- this.replicate_on_write = false; +- setMerge_shards_chanceIsSet(false); +- this.merge_shards_chance = 0.0; +- this.row_cache_provider = null; +- setRow_cache_keys_to_saveIsSet(false); +- this.row_cache_keys_to_save = 0; +- setPopulate_io_cache_on_flushIsSet(false); +- this.populate_io_cache_on_flush = false; +- setIndex_intervalIsSet(false); +- this.index_interval = 0; +- } +- +- public String getKeyspace() { +- return this.keyspace; +- } +- +- public CfDef setKeyspace(String keyspace) { +- this.keyspace = keyspace; +- return this; +- } +- +- public void unsetKeyspace() { +- this.keyspace = null; +- } +- +- /** Returns true if field keyspace is set (has been assigned a value) and false otherwise */ +- public boolean isSetKeyspace() { +- return this.keyspace != null; +- } +- +- public void setKeyspaceIsSet(boolean value) { +- if (!value) { +- this.keyspace = null; +- } +- } +- +- public String getName() { +- return this.name; +- } +- +- public CfDef setName(String name) { +- this.name = name; +- return this; +- } +- +- public void unsetName() { +- this.name = null; +- } +- +- /** Returns true if field name is set (has been assigned a value) and false otherwise */ +- public boolean isSetName() { +- return this.name != null; +- } +- +- public void setNameIsSet(boolean value) { +- if (!value) { +- this.name = null; +- } +- } +- +- public String getColumn_type() { +- return this.column_type; +- } +- +- public CfDef setColumn_type(String column_type) { +- this.column_type = column_type; +- return this; +- } +- +- public void unsetColumn_type() { +- this.column_type = null; +- } +- +- /** Returns true if field column_type is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_type() { +- return this.column_type != null; +- } +- +- public void setColumn_typeIsSet(boolean value) { +- if (!value) { +- this.column_type = null; +- } +- } +- +- public String getComparator_type() { +- return this.comparator_type; +- } +- +- public CfDef setComparator_type(String comparator_type) { +- this.comparator_type = comparator_type; +- return this; +- } +- +- public void unsetComparator_type() { +- this.comparator_type = null; +- } +- +- /** Returns true if field comparator_type is set (has been assigned a value) and false otherwise */ +- public boolean isSetComparator_type() { +- return this.comparator_type != null; +- } +- +- public void setComparator_typeIsSet(boolean value) { +- if (!value) { +- this.comparator_type = null; +- } +- } +- +- public String getSubcomparator_type() { +- return this.subcomparator_type; +- } +- +- public CfDef setSubcomparator_type(String subcomparator_type) { +- this.subcomparator_type = subcomparator_type; +- return this; +- } +- +- public void unsetSubcomparator_type() { +- this.subcomparator_type = null; +- } +- +- /** Returns true if field subcomparator_type is set (has been assigned a value) and false otherwise */ +- public boolean isSetSubcomparator_type() { +- return this.subcomparator_type != null; +- } +- +- public void setSubcomparator_typeIsSet(boolean value) { +- if (!value) { +- this.subcomparator_type = null; +- } +- } +- +- public String getComment() { +- return this.comment; +- } +- +- public CfDef setComment(String comment) { +- this.comment = comment; +- return this; +- } +- +- public void unsetComment() { +- this.comment = null; +- } +- +- /** Returns true if field comment is set (has been assigned a value) and false otherwise */ +- public boolean isSetComment() { +- return this.comment != null; +- } +- +- public void setCommentIsSet(boolean value) { +- if (!value) { +- this.comment = null; +- } +- } +- +- public double getRead_repair_chance() { +- return this.read_repair_chance; +- } +- +- public CfDef setRead_repair_chance(double read_repair_chance) { +- this.read_repair_chance = read_repair_chance; +- setRead_repair_chanceIsSet(true); +- return this; +- } +- +- public void unsetRead_repair_chance() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __READ_REPAIR_CHANCE_ISSET_ID); +- } +- +- /** Returns true if field read_repair_chance is set (has been assigned a value) and false otherwise */ +- public boolean isSetRead_repair_chance() { +- return EncodingUtils.testBit(__isset_bitfield, __READ_REPAIR_CHANCE_ISSET_ID); +- } +- +- public void setRead_repair_chanceIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __READ_REPAIR_CHANCE_ISSET_ID, value); +- } +- +- public int getColumn_metadataSize() { +- return (this.column_metadata == null) ? 0 : this.column_metadata.size(); +- } +- +- public java.util.Iterator getColumn_metadataIterator() { +- return (this.column_metadata == null) ? null : this.column_metadata.iterator(); +- } +- +- public void addToColumn_metadata(ColumnDef elem) { +- if (this.column_metadata == null) { +- this.column_metadata = new ArrayList(); +- } +- this.column_metadata.add(elem); +- } +- +- public List getColumn_metadata() { +- return this.column_metadata; +- } +- +- public CfDef setColumn_metadata(List column_metadata) { +- this.column_metadata = column_metadata; +- return this; +- } +- +- public void unsetColumn_metadata() { +- this.column_metadata = null; +- } +- +- /** Returns true if field column_metadata is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_metadata() { +- return this.column_metadata != null; +- } +- +- public void setColumn_metadataIsSet(boolean value) { +- if (!value) { +- this.column_metadata = null; +- } +- } +- +- public int getGc_grace_seconds() { +- return this.gc_grace_seconds; +- } +- +- public CfDef setGc_grace_seconds(int gc_grace_seconds) { +- this.gc_grace_seconds = gc_grace_seconds; +- setGc_grace_secondsIsSet(true); +- return this; +- } +- +- public void unsetGc_grace_seconds() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __GC_GRACE_SECONDS_ISSET_ID); +- } +- +- /** Returns true if field gc_grace_seconds is set (has been assigned a value) and false otherwise */ +- public boolean isSetGc_grace_seconds() { +- return EncodingUtils.testBit(__isset_bitfield, __GC_GRACE_SECONDS_ISSET_ID); +- } +- +- public void setGc_grace_secondsIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __GC_GRACE_SECONDS_ISSET_ID, value); +- } +- +- public String getDefault_validation_class() { +- return this.default_validation_class; +- } +- +- public CfDef setDefault_validation_class(String default_validation_class) { +- this.default_validation_class = default_validation_class; +- return this; +- } +- +- public void unsetDefault_validation_class() { +- this.default_validation_class = null; +- } +- +- /** Returns true if field default_validation_class is set (has been assigned a value) and false otherwise */ +- public boolean isSetDefault_validation_class() { +- return this.default_validation_class != null; +- } +- +- public void setDefault_validation_classIsSet(boolean value) { +- if (!value) { +- this.default_validation_class = null; +- } +- } +- +- public int getId() { +- return this.id; +- } +- +- public CfDef setId(int id) { +- this.id = id; +- setIdIsSet(true); +- return this; +- } +- +- public void unsetId() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ID_ISSET_ID); +- } +- +- /** Returns true if field id is set (has been assigned a value) and false otherwise */ +- public boolean isSetId() { +- return EncodingUtils.testBit(__isset_bitfield, __ID_ISSET_ID); +- } +- +- public void setIdIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value); +- } +- +- public int getMin_compaction_threshold() { +- return this.min_compaction_threshold; +- } +- +- public CfDef setMin_compaction_threshold(int min_compaction_threshold) { +- this.min_compaction_threshold = min_compaction_threshold; +- setMin_compaction_thresholdIsSet(true); +- return this; +- } +- +- public void unsetMin_compaction_threshold() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MIN_COMPACTION_THRESHOLD_ISSET_ID); +- } +- +- /** Returns true if field min_compaction_threshold is set (has been assigned a value) and false otherwise */ +- public boolean isSetMin_compaction_threshold() { +- return EncodingUtils.testBit(__isset_bitfield, __MIN_COMPACTION_THRESHOLD_ISSET_ID); +- } +- +- public void setMin_compaction_thresholdIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MIN_COMPACTION_THRESHOLD_ISSET_ID, value); +- } +- +- public int getMax_compaction_threshold() { +- return this.max_compaction_threshold; +- } +- +- public CfDef setMax_compaction_threshold(int max_compaction_threshold) { +- this.max_compaction_threshold = max_compaction_threshold; +- setMax_compaction_thresholdIsSet(true); +- return this; +- } +- +- public void unsetMax_compaction_threshold() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_COMPACTION_THRESHOLD_ISSET_ID); +- } +- +- /** Returns true if field max_compaction_threshold is set (has been assigned a value) and false otherwise */ +- public boolean isSetMax_compaction_threshold() { +- return EncodingUtils.testBit(__isset_bitfield, __MAX_COMPACTION_THRESHOLD_ISSET_ID); +- } +- +- public void setMax_compaction_thresholdIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_COMPACTION_THRESHOLD_ISSET_ID, value); +- } +- +- public String getKey_validation_class() { +- return this.key_validation_class; +- } +- +- public CfDef setKey_validation_class(String key_validation_class) { +- this.key_validation_class = key_validation_class; +- return this; +- } +- +- public void unsetKey_validation_class() { +- this.key_validation_class = null; +- } +- +- /** Returns true if field key_validation_class is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey_validation_class() { +- return this.key_validation_class != null; +- } +- +- public void setKey_validation_classIsSet(boolean value) { +- if (!value) { +- this.key_validation_class = null; +- } +- } +- +- public byte[] getKey_alias() { +- setKey_alias(org.apache.thrift.TBaseHelper.rightSize(key_alias)); +- return key_alias == null ? null : key_alias.array(); +- } +- +- public ByteBuffer bufferForKey_alias() { +- return key_alias; +- } +- +- public CfDef setKey_alias(byte[] key_alias) { +- setKey_alias(key_alias == null ? (ByteBuffer)null : ByteBuffer.wrap(key_alias)); +- return this; +- } +- +- public CfDef setKey_alias(ByteBuffer key_alias) { +- this.key_alias = key_alias; +- return this; +- } +- +- public void unsetKey_alias() { +- this.key_alias = null; +- } +- +- /** Returns true if field key_alias is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey_alias() { +- return this.key_alias != null; +- } +- +- public void setKey_aliasIsSet(boolean value) { +- if (!value) { +- this.key_alias = null; +- } +- } +- +- public String getCompaction_strategy() { +- return this.compaction_strategy; +- } +- +- public CfDef setCompaction_strategy(String compaction_strategy) { +- this.compaction_strategy = compaction_strategy; +- return this; +- } +- +- public void unsetCompaction_strategy() { +- this.compaction_strategy = null; +- } +- +- /** Returns true if field compaction_strategy is set (has been assigned a value) and false otherwise */ +- public boolean isSetCompaction_strategy() { +- return this.compaction_strategy != null; +- } +- +- public void setCompaction_strategyIsSet(boolean value) { +- if (!value) { +- this.compaction_strategy = null; +- } +- } +- +- public int getCompaction_strategy_optionsSize() { +- return (this.compaction_strategy_options == null) ? 0 : this.compaction_strategy_options.size(); +- } +- +- public void putToCompaction_strategy_options(String key, String val) { +- if (this.compaction_strategy_options == null) { +- this.compaction_strategy_options = new HashMap(); +- } +- this.compaction_strategy_options.put(key, val); +- } +- +- public Map getCompaction_strategy_options() { +- return this.compaction_strategy_options; +- } +- +- public CfDef setCompaction_strategy_options(Map compaction_strategy_options) { +- this.compaction_strategy_options = compaction_strategy_options; +- return this; +- } +- +- public void unsetCompaction_strategy_options() { +- this.compaction_strategy_options = null; +- } +- +- /** Returns true if field compaction_strategy_options is set (has been assigned a value) and false otherwise */ +- public boolean isSetCompaction_strategy_options() { +- return this.compaction_strategy_options != null; +- } +- +- public void setCompaction_strategy_optionsIsSet(boolean value) { +- if (!value) { +- this.compaction_strategy_options = null; +- } +- } +- +- public int getCompression_optionsSize() { +- return (this.compression_options == null) ? 0 : this.compression_options.size(); +- } +- +- public void putToCompression_options(String key, String val) { +- if (this.compression_options == null) { +- this.compression_options = new HashMap(); +- } +- this.compression_options.put(key, val); +- } +- +- public Map getCompression_options() { +- return this.compression_options; +- } +- +- public CfDef setCompression_options(Map compression_options) { +- this.compression_options = compression_options; +- return this; +- } +- +- public void unsetCompression_options() { +- this.compression_options = null; +- } +- +- /** Returns true if field compression_options is set (has been assigned a value) and false otherwise */ +- public boolean isSetCompression_options() { +- return this.compression_options != null; +- } +- +- public void setCompression_optionsIsSet(boolean value) { +- if (!value) { +- this.compression_options = null; +- } +- } +- +- public double getBloom_filter_fp_chance() { +- return this.bloom_filter_fp_chance; +- } +- +- public CfDef setBloom_filter_fp_chance(double bloom_filter_fp_chance) { +- this.bloom_filter_fp_chance = bloom_filter_fp_chance; +- setBloom_filter_fp_chanceIsSet(true); +- return this; +- } +- +- public void unsetBloom_filter_fp_chance() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __BLOOM_FILTER_FP_CHANCE_ISSET_ID); +- } +- +- /** Returns true if field bloom_filter_fp_chance is set (has been assigned a value) and false otherwise */ +- public boolean isSetBloom_filter_fp_chance() { +- return EncodingUtils.testBit(__isset_bitfield, __BLOOM_FILTER_FP_CHANCE_ISSET_ID); +- } +- +- public void setBloom_filter_fp_chanceIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __BLOOM_FILTER_FP_CHANCE_ISSET_ID, value); +- } +- +- public String getCaching() { +- return this.caching; +- } +- +- public CfDef setCaching(String caching) { +- this.caching = caching; +- return this; +- } +- +- public void unsetCaching() { +- this.caching = null; +- } +- +- /** Returns true if field caching is set (has been assigned a value) and false otherwise */ +- public boolean isSetCaching() { +- return this.caching != null; +- } +- +- public void setCachingIsSet(boolean value) { +- if (!value) { +- this.caching = null; +- } +- } +- +- public double getDclocal_read_repair_chance() { +- return this.dclocal_read_repair_chance; +- } +- +- public CfDef setDclocal_read_repair_chance(double dclocal_read_repair_chance) { +- this.dclocal_read_repair_chance = dclocal_read_repair_chance; +- setDclocal_read_repair_chanceIsSet(true); +- return this; +- } +- +- public void unsetDclocal_read_repair_chance() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DCLOCAL_READ_REPAIR_CHANCE_ISSET_ID); +- } +- +- /** Returns true if field dclocal_read_repair_chance is set (has been assigned a value) and false otherwise */ +- public boolean isSetDclocal_read_repair_chance() { +- return EncodingUtils.testBit(__isset_bitfield, __DCLOCAL_READ_REPAIR_CHANCE_ISSET_ID); +- } +- +- public void setDclocal_read_repair_chanceIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DCLOCAL_READ_REPAIR_CHANCE_ISSET_ID, value); +- } +- +- public int getMemtable_flush_period_in_ms() { +- return this.memtable_flush_period_in_ms; +- } +- +- public CfDef setMemtable_flush_period_in_ms(int memtable_flush_period_in_ms) { +- this.memtable_flush_period_in_ms = memtable_flush_period_in_ms; +- setMemtable_flush_period_in_msIsSet(true); +- return this; +- } +- +- public void unsetMemtable_flush_period_in_ms() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MEMTABLE_FLUSH_PERIOD_IN_MS_ISSET_ID); +- } +- +- /** Returns true if field memtable_flush_period_in_ms is set (has been assigned a value) and false otherwise */ +- public boolean isSetMemtable_flush_period_in_ms() { +- return EncodingUtils.testBit(__isset_bitfield, __MEMTABLE_FLUSH_PERIOD_IN_MS_ISSET_ID); +- } +- +- public void setMemtable_flush_period_in_msIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MEMTABLE_FLUSH_PERIOD_IN_MS_ISSET_ID, value); +- } +- +- public int getDefault_time_to_live() { +- return this.default_time_to_live; +- } +- +- public CfDef setDefault_time_to_live(int default_time_to_live) { +- this.default_time_to_live = default_time_to_live; +- setDefault_time_to_liveIsSet(true); +- return this; +- } +- +- public void unsetDefault_time_to_live() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DEFAULT_TIME_TO_LIVE_ISSET_ID); +- } +- +- /** Returns true if field default_time_to_live is set (has been assigned a value) and false otherwise */ +- public boolean isSetDefault_time_to_live() { +- return EncodingUtils.testBit(__isset_bitfield, __DEFAULT_TIME_TO_LIVE_ISSET_ID); +- } +- +- public void setDefault_time_to_liveIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DEFAULT_TIME_TO_LIVE_ISSET_ID, value); +- } +- +- public String getSpeculative_retry() { +- return this.speculative_retry; +- } +- +- public CfDef setSpeculative_retry(String speculative_retry) { +- this.speculative_retry = speculative_retry; +- return this; +- } +- +- public void unsetSpeculative_retry() { +- this.speculative_retry = null; +- } +- +- /** Returns true if field speculative_retry is set (has been assigned a value) and false otherwise */ +- public boolean isSetSpeculative_retry() { +- return this.speculative_retry != null; +- } +- +- public void setSpeculative_retryIsSet(boolean value) { +- if (!value) { +- this.speculative_retry = null; +- } +- } +- +- public int getTriggersSize() { +- return (this.triggers == null) ? 0 : this.triggers.size(); +- } +- +- public java.util.Iterator getTriggersIterator() { +- return (this.triggers == null) ? null : this.triggers.iterator(); +- } +- +- public void addToTriggers(TriggerDef elem) { +- if (this.triggers == null) { +- this.triggers = new ArrayList(); +- } +- this.triggers.add(elem); +- } +- +- public List getTriggers() { +- return this.triggers; +- } +- +- public CfDef setTriggers(List triggers) { +- this.triggers = triggers; +- return this; +- } +- +- public void unsetTriggers() { +- this.triggers = null; +- } +- +- /** Returns true if field triggers is set (has been assigned a value) and false otherwise */ +- public boolean isSetTriggers() { +- return this.triggers != null; +- } +- +- public void setTriggersIsSet(boolean value) { +- if (!value) { +- this.triggers = null; +- } +- } +- +- public String getCells_per_row_to_cache() { +- return this.cells_per_row_to_cache; +- } +- +- public CfDef setCells_per_row_to_cache(String cells_per_row_to_cache) { +- this.cells_per_row_to_cache = cells_per_row_to_cache; +- return this; +- } +- +- public void unsetCells_per_row_to_cache() { +- this.cells_per_row_to_cache = null; +- } +- +- /** Returns true if field cells_per_row_to_cache is set (has been assigned a value) and false otherwise */ +- public boolean isSetCells_per_row_to_cache() { +- return this.cells_per_row_to_cache != null; +- } +- +- public void setCells_per_row_to_cacheIsSet(boolean value) { +- if (!value) { +- this.cells_per_row_to_cache = null; +- } +- } +- +- public int getMin_index_interval() { +- return this.min_index_interval; +- } +- +- public CfDef setMin_index_interval(int min_index_interval) { +- this.min_index_interval = min_index_interval; +- setMin_index_intervalIsSet(true); +- return this; +- } +- +- public void unsetMin_index_interval() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MIN_INDEX_INTERVAL_ISSET_ID); +- } +- +- /** Returns true if field min_index_interval is set (has been assigned a value) and false otherwise */ +- public boolean isSetMin_index_interval() { +- return EncodingUtils.testBit(__isset_bitfield, __MIN_INDEX_INTERVAL_ISSET_ID); +- } +- +- public void setMin_index_intervalIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MIN_INDEX_INTERVAL_ISSET_ID, value); +- } +- +- public int getMax_index_interval() { +- return this.max_index_interval; +- } +- +- public CfDef setMax_index_interval(int max_index_interval) { +- this.max_index_interval = max_index_interval; +- setMax_index_intervalIsSet(true); +- return this; +- } +- +- public void unsetMax_index_interval() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_INDEX_INTERVAL_ISSET_ID); +- } +- +- /** Returns true if field max_index_interval is set (has been assigned a value) and false otherwise */ +- public boolean isSetMax_index_interval() { +- return EncodingUtils.testBit(__isset_bitfield, __MAX_INDEX_INTERVAL_ISSET_ID); +- } +- +- public void setMax_index_intervalIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_INDEX_INTERVAL_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public double getRow_cache_size() { +- return this.row_cache_size; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setRow_cache_size(double row_cache_size) { +- this.row_cache_size = row_cache_size; +- setRow_cache_sizeIsSet(true); +- return this; +- } +- +- public void unsetRow_cache_size() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ROW_CACHE_SIZE_ISSET_ID); +- } +- +- /** Returns true if field row_cache_size is set (has been assigned a value) and false otherwise */ +- public boolean isSetRow_cache_size() { +- return EncodingUtils.testBit(__isset_bitfield, __ROW_CACHE_SIZE_ISSET_ID); +- } +- +- public void setRow_cache_sizeIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ROW_CACHE_SIZE_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public double getKey_cache_size() { +- return this.key_cache_size; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setKey_cache_size(double key_cache_size) { +- this.key_cache_size = key_cache_size; +- setKey_cache_sizeIsSet(true); +- return this; +- } +- +- public void unsetKey_cache_size() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __KEY_CACHE_SIZE_ISSET_ID); +- } +- +- /** Returns true if field key_cache_size is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey_cache_size() { +- return EncodingUtils.testBit(__isset_bitfield, __KEY_CACHE_SIZE_ISSET_ID); +- } +- +- public void setKey_cache_sizeIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __KEY_CACHE_SIZE_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public int getRow_cache_save_period_in_seconds() { +- return this.row_cache_save_period_in_seconds; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setRow_cache_save_period_in_seconds(int row_cache_save_period_in_seconds) { +- this.row_cache_save_period_in_seconds = row_cache_save_period_in_seconds; +- setRow_cache_save_period_in_secondsIsSet(true); +- return this; +- } +- +- public void unsetRow_cache_save_period_in_seconds() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ROW_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID); +- } +- +- /** Returns true if field row_cache_save_period_in_seconds is set (has been assigned a value) and false otherwise */ +- public boolean isSetRow_cache_save_period_in_seconds() { +- return EncodingUtils.testBit(__isset_bitfield, __ROW_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID); +- } +- +- public void setRow_cache_save_period_in_secondsIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ROW_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public int getKey_cache_save_period_in_seconds() { +- return this.key_cache_save_period_in_seconds; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setKey_cache_save_period_in_seconds(int key_cache_save_period_in_seconds) { +- this.key_cache_save_period_in_seconds = key_cache_save_period_in_seconds; +- setKey_cache_save_period_in_secondsIsSet(true); +- return this; +- } +- +- public void unsetKey_cache_save_period_in_seconds() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __KEY_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID); +- } +- +- /** Returns true if field key_cache_save_period_in_seconds is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey_cache_save_period_in_seconds() { +- return EncodingUtils.testBit(__isset_bitfield, __KEY_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID); +- } +- +- public void setKey_cache_save_period_in_secondsIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __KEY_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public int getMemtable_flush_after_mins() { +- return this.memtable_flush_after_mins; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setMemtable_flush_after_mins(int memtable_flush_after_mins) { +- this.memtable_flush_after_mins = memtable_flush_after_mins; +- setMemtable_flush_after_minsIsSet(true); +- return this; +- } +- +- public void unsetMemtable_flush_after_mins() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MEMTABLE_FLUSH_AFTER_MINS_ISSET_ID); +- } +- +- /** Returns true if field memtable_flush_after_mins is set (has been assigned a value) and false otherwise */ +- public boolean isSetMemtable_flush_after_mins() { +- return EncodingUtils.testBit(__isset_bitfield, __MEMTABLE_FLUSH_AFTER_MINS_ISSET_ID); +- } +- +- public void setMemtable_flush_after_minsIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MEMTABLE_FLUSH_AFTER_MINS_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public int getMemtable_throughput_in_mb() { +- return this.memtable_throughput_in_mb; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setMemtable_throughput_in_mb(int memtable_throughput_in_mb) { +- this.memtable_throughput_in_mb = memtable_throughput_in_mb; +- setMemtable_throughput_in_mbIsSet(true); +- return this; +- } +- +- public void unsetMemtable_throughput_in_mb() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MEMTABLE_THROUGHPUT_IN_MB_ISSET_ID); +- } +- +- /** Returns true if field memtable_throughput_in_mb is set (has been assigned a value) and false otherwise */ +- public boolean isSetMemtable_throughput_in_mb() { +- return EncodingUtils.testBit(__isset_bitfield, __MEMTABLE_THROUGHPUT_IN_MB_ISSET_ID); +- } +- +- public void setMemtable_throughput_in_mbIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MEMTABLE_THROUGHPUT_IN_MB_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public double getMemtable_operations_in_millions() { +- return this.memtable_operations_in_millions; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setMemtable_operations_in_millions(double memtable_operations_in_millions) { +- this.memtable_operations_in_millions = memtable_operations_in_millions; +- setMemtable_operations_in_millionsIsSet(true); +- return this; +- } +- +- public void unsetMemtable_operations_in_millions() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MEMTABLE_OPERATIONS_IN_MILLIONS_ISSET_ID); +- } +- +- /** Returns true if field memtable_operations_in_millions is set (has been assigned a value) and false otherwise */ +- public boolean isSetMemtable_operations_in_millions() { +- return EncodingUtils.testBit(__isset_bitfield, __MEMTABLE_OPERATIONS_IN_MILLIONS_ISSET_ID); +- } +- +- public void setMemtable_operations_in_millionsIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MEMTABLE_OPERATIONS_IN_MILLIONS_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public boolean isReplicate_on_write() { +- return this.replicate_on_write; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setReplicate_on_write(boolean replicate_on_write) { +- this.replicate_on_write = replicate_on_write; +- setReplicate_on_writeIsSet(true); +- return this; +- } +- +- public void unsetReplicate_on_write() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REPLICATE_ON_WRITE_ISSET_ID); +- } +- +- /** Returns true if field replicate_on_write is set (has been assigned a value) and false otherwise */ +- public boolean isSetReplicate_on_write() { +- return EncodingUtils.testBit(__isset_bitfield, __REPLICATE_ON_WRITE_ISSET_ID); +- } +- +- public void setReplicate_on_writeIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REPLICATE_ON_WRITE_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public double getMerge_shards_chance() { +- return this.merge_shards_chance; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setMerge_shards_chance(double merge_shards_chance) { +- this.merge_shards_chance = merge_shards_chance; +- setMerge_shards_chanceIsSet(true); +- return this; +- } +- +- public void unsetMerge_shards_chance() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MERGE_SHARDS_CHANCE_ISSET_ID); +- } +- +- /** Returns true if field merge_shards_chance is set (has been assigned a value) and false otherwise */ +- public boolean isSetMerge_shards_chance() { +- return EncodingUtils.testBit(__isset_bitfield, __MERGE_SHARDS_CHANCE_ISSET_ID); +- } +- +- public void setMerge_shards_chanceIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MERGE_SHARDS_CHANCE_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public String getRow_cache_provider() { +- return this.row_cache_provider; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setRow_cache_provider(String row_cache_provider) { +- this.row_cache_provider = row_cache_provider; +- return this; +- } +- +- public void unsetRow_cache_provider() { +- this.row_cache_provider = null; +- } +- +- /** Returns true if field row_cache_provider is set (has been assigned a value) and false otherwise */ +- public boolean isSetRow_cache_provider() { +- return this.row_cache_provider != null; +- } +- +- public void setRow_cache_providerIsSet(boolean value) { +- if (!value) { +- this.row_cache_provider = null; +- } +- } +- +- /** +- * @deprecated +- */ +- public int getRow_cache_keys_to_save() { +- return this.row_cache_keys_to_save; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setRow_cache_keys_to_save(int row_cache_keys_to_save) { +- this.row_cache_keys_to_save = row_cache_keys_to_save; +- setRow_cache_keys_to_saveIsSet(true); +- return this; +- } +- +- public void unsetRow_cache_keys_to_save() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ROW_CACHE_KEYS_TO_SAVE_ISSET_ID); +- } +- +- /** Returns true if field row_cache_keys_to_save is set (has been assigned a value) and false otherwise */ +- public boolean isSetRow_cache_keys_to_save() { +- return EncodingUtils.testBit(__isset_bitfield, __ROW_CACHE_KEYS_TO_SAVE_ISSET_ID); +- } +- +- public void setRow_cache_keys_to_saveIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ROW_CACHE_KEYS_TO_SAVE_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public boolean isPopulate_io_cache_on_flush() { +- return this.populate_io_cache_on_flush; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setPopulate_io_cache_on_flush(boolean populate_io_cache_on_flush) { +- this.populate_io_cache_on_flush = populate_io_cache_on_flush; +- setPopulate_io_cache_on_flushIsSet(true); +- return this; +- } +- +- public void unsetPopulate_io_cache_on_flush() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID); +- } +- +- /** Returns true if field populate_io_cache_on_flush is set (has been assigned a value) and false otherwise */ +- public boolean isSetPopulate_io_cache_on_flush() { +- return EncodingUtils.testBit(__isset_bitfield, __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID); +- } +- +- public void setPopulate_io_cache_on_flushIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID, value); +- } +- +- /** +- * @deprecated +- */ +- public int getIndex_interval() { +- return this.index_interval; +- } +- +- /** +- * @deprecated +- */ +- public CfDef setIndex_interval(int index_interval) { +- this.index_interval = index_interval; +- setIndex_intervalIsSet(true); +- return this; +- } +- +- public void unsetIndex_interval() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __INDEX_INTERVAL_ISSET_ID); +- } +- +- /** Returns true if field index_interval is set (has been assigned a value) and false otherwise */ +- public boolean isSetIndex_interval() { +- return EncodingUtils.testBit(__isset_bitfield, __INDEX_INTERVAL_ISSET_ID); +- } +- +- public void setIndex_intervalIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __INDEX_INTERVAL_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEYSPACE: +- if (value == null) { +- unsetKeyspace(); +- } else { +- setKeyspace((String)value); +- } +- break; +- +- case NAME: +- if (value == null) { +- unsetName(); +- } else { +- setName((String)value); +- } +- break; +- +- case COLUMN_TYPE: +- if (value == null) { +- unsetColumn_type(); +- } else { +- setColumn_type((String)value); +- } +- break; +- +- case COMPARATOR_TYPE: +- if (value == null) { +- unsetComparator_type(); +- } else { +- setComparator_type((String)value); +- } +- break; +- +- case SUBCOMPARATOR_TYPE: +- if (value == null) { +- unsetSubcomparator_type(); +- } else { +- setSubcomparator_type((String)value); +- } +- break; +- +- case COMMENT: +- if (value == null) { +- unsetComment(); +- } else { +- setComment((String)value); +- } +- break; +- +- case READ_REPAIR_CHANCE: +- if (value == null) { +- unsetRead_repair_chance(); +- } else { +- setRead_repair_chance((Double)value); +- } +- break; +- +- case COLUMN_METADATA: +- if (value == null) { +- unsetColumn_metadata(); +- } else { +- setColumn_metadata((List)value); +- } +- break; +- +- case GC_GRACE_SECONDS: +- if (value == null) { +- unsetGc_grace_seconds(); +- } else { +- setGc_grace_seconds((Integer)value); +- } +- break; +- +- case DEFAULT_VALIDATION_CLASS: +- if (value == null) { +- unsetDefault_validation_class(); +- } else { +- setDefault_validation_class((String)value); +- } +- break; +- +- case ID: +- if (value == null) { +- unsetId(); +- } else { +- setId((Integer)value); +- } +- break; +- +- case MIN_COMPACTION_THRESHOLD: +- if (value == null) { +- unsetMin_compaction_threshold(); +- } else { +- setMin_compaction_threshold((Integer)value); +- } +- break; +- +- case MAX_COMPACTION_THRESHOLD: +- if (value == null) { +- unsetMax_compaction_threshold(); +- } else { +- setMax_compaction_threshold((Integer)value); +- } +- break; +- +- case KEY_VALIDATION_CLASS: +- if (value == null) { +- unsetKey_validation_class(); +- } else { +- setKey_validation_class((String)value); +- } +- break; +- +- case KEY_ALIAS: +- if (value == null) { +- unsetKey_alias(); +- } else { +- setKey_alias((ByteBuffer)value); +- } +- break; +- +- case COMPACTION_STRATEGY: +- if (value == null) { +- unsetCompaction_strategy(); +- } else { +- setCompaction_strategy((String)value); +- } +- break; +- +- case COMPACTION_STRATEGY_OPTIONS: +- if (value == null) { +- unsetCompaction_strategy_options(); +- } else { +- setCompaction_strategy_options((Map)value); +- } +- break; +- +- case COMPRESSION_OPTIONS: +- if (value == null) { +- unsetCompression_options(); +- } else { +- setCompression_options((Map)value); +- } +- break; +- +- case BLOOM_FILTER_FP_CHANCE: +- if (value == null) { +- unsetBloom_filter_fp_chance(); +- } else { +- setBloom_filter_fp_chance((Double)value); +- } +- break; +- +- case CACHING: +- if (value == null) { +- unsetCaching(); +- } else { +- setCaching((String)value); +- } +- break; +- +- case DCLOCAL_READ_REPAIR_CHANCE: +- if (value == null) { +- unsetDclocal_read_repair_chance(); +- } else { +- setDclocal_read_repair_chance((Double)value); +- } +- break; +- +- case MEMTABLE_FLUSH_PERIOD_IN_MS: +- if (value == null) { +- unsetMemtable_flush_period_in_ms(); +- } else { +- setMemtable_flush_period_in_ms((Integer)value); +- } +- break; +- +- case DEFAULT_TIME_TO_LIVE: +- if (value == null) { +- unsetDefault_time_to_live(); +- } else { +- setDefault_time_to_live((Integer)value); +- } +- break; +- +- case SPECULATIVE_RETRY: +- if (value == null) { +- unsetSpeculative_retry(); +- } else { +- setSpeculative_retry((String)value); +- } +- break; +- +- case TRIGGERS: +- if (value == null) { +- unsetTriggers(); +- } else { +- setTriggers((List)value); +- } +- break; +- +- case CELLS_PER_ROW_TO_CACHE: +- if (value == null) { +- unsetCells_per_row_to_cache(); +- } else { +- setCells_per_row_to_cache((String)value); +- } +- break; +- +- case MIN_INDEX_INTERVAL: +- if (value == null) { +- unsetMin_index_interval(); +- } else { +- setMin_index_interval((Integer)value); +- } +- break; +- +- case MAX_INDEX_INTERVAL: +- if (value == null) { +- unsetMax_index_interval(); +- } else { +- setMax_index_interval((Integer)value); +- } +- break; +- +- case ROW_CACHE_SIZE: +- if (value == null) { +- unsetRow_cache_size(); +- } else { +- setRow_cache_size((Double)value); +- } +- break; +- +- case KEY_CACHE_SIZE: +- if (value == null) { +- unsetKey_cache_size(); +- } else { +- setKey_cache_size((Double)value); +- } +- break; +- +- case ROW_CACHE_SAVE_PERIOD_IN_SECONDS: +- if (value == null) { +- unsetRow_cache_save_period_in_seconds(); +- } else { +- setRow_cache_save_period_in_seconds((Integer)value); +- } +- break; +- +- case KEY_CACHE_SAVE_PERIOD_IN_SECONDS: +- if (value == null) { +- unsetKey_cache_save_period_in_seconds(); +- } else { +- setKey_cache_save_period_in_seconds((Integer)value); +- } +- break; +- +- case MEMTABLE_FLUSH_AFTER_MINS: +- if (value == null) { +- unsetMemtable_flush_after_mins(); +- } else { +- setMemtable_flush_after_mins((Integer)value); +- } +- break; +- +- case MEMTABLE_THROUGHPUT_IN_MB: +- if (value == null) { +- unsetMemtable_throughput_in_mb(); +- } else { +- setMemtable_throughput_in_mb((Integer)value); +- } +- break; +- +- case MEMTABLE_OPERATIONS_IN_MILLIONS: +- if (value == null) { +- unsetMemtable_operations_in_millions(); +- } else { +- setMemtable_operations_in_millions((Double)value); +- } +- break; +- +- case REPLICATE_ON_WRITE: +- if (value == null) { +- unsetReplicate_on_write(); +- } else { +- setReplicate_on_write((Boolean)value); +- } +- break; +- +- case MERGE_SHARDS_CHANCE: +- if (value == null) { +- unsetMerge_shards_chance(); +- } else { +- setMerge_shards_chance((Double)value); +- } +- break; +- +- case ROW_CACHE_PROVIDER: +- if (value == null) { +- unsetRow_cache_provider(); +- } else { +- setRow_cache_provider((String)value); +- } +- break; +- +- case ROW_CACHE_KEYS_TO_SAVE: +- if (value == null) { +- unsetRow_cache_keys_to_save(); +- } else { +- setRow_cache_keys_to_save((Integer)value); +- } +- break; +- +- case POPULATE_IO_CACHE_ON_FLUSH: +- if (value == null) { +- unsetPopulate_io_cache_on_flush(); +- } else { +- setPopulate_io_cache_on_flush((Boolean)value); +- } +- break; +- +- case INDEX_INTERVAL: +- if (value == null) { +- unsetIndex_interval(); +- } else { +- setIndex_interval((Integer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEYSPACE: +- return getKeyspace(); +- +- case NAME: +- return getName(); +- +- case COLUMN_TYPE: +- return getColumn_type(); +- +- case COMPARATOR_TYPE: +- return getComparator_type(); +- +- case SUBCOMPARATOR_TYPE: +- return getSubcomparator_type(); +- +- case COMMENT: +- return getComment(); +- +- case READ_REPAIR_CHANCE: +- return Double.valueOf(getRead_repair_chance()); +- +- case COLUMN_METADATA: +- return getColumn_metadata(); +- +- case GC_GRACE_SECONDS: +- return Integer.valueOf(getGc_grace_seconds()); +- +- case DEFAULT_VALIDATION_CLASS: +- return getDefault_validation_class(); +- +- case ID: +- return Integer.valueOf(getId()); +- +- case MIN_COMPACTION_THRESHOLD: +- return Integer.valueOf(getMin_compaction_threshold()); +- +- case MAX_COMPACTION_THRESHOLD: +- return Integer.valueOf(getMax_compaction_threshold()); +- +- case KEY_VALIDATION_CLASS: +- return getKey_validation_class(); +- +- case KEY_ALIAS: +- return getKey_alias(); +- +- case COMPACTION_STRATEGY: +- return getCompaction_strategy(); +- +- case COMPACTION_STRATEGY_OPTIONS: +- return getCompaction_strategy_options(); +- +- case COMPRESSION_OPTIONS: +- return getCompression_options(); +- +- case BLOOM_FILTER_FP_CHANCE: +- return Double.valueOf(getBloom_filter_fp_chance()); +- +- case CACHING: +- return getCaching(); +- +- case DCLOCAL_READ_REPAIR_CHANCE: +- return Double.valueOf(getDclocal_read_repair_chance()); +- +- case MEMTABLE_FLUSH_PERIOD_IN_MS: +- return Integer.valueOf(getMemtable_flush_period_in_ms()); +- +- case DEFAULT_TIME_TO_LIVE: +- return Integer.valueOf(getDefault_time_to_live()); +- +- case SPECULATIVE_RETRY: +- return getSpeculative_retry(); +- +- case TRIGGERS: +- return getTriggers(); +- +- case CELLS_PER_ROW_TO_CACHE: +- return getCells_per_row_to_cache(); +- +- case MIN_INDEX_INTERVAL: +- return Integer.valueOf(getMin_index_interval()); +- +- case MAX_INDEX_INTERVAL: +- return Integer.valueOf(getMax_index_interval()); +- +- case ROW_CACHE_SIZE: +- return Double.valueOf(getRow_cache_size()); +- +- case KEY_CACHE_SIZE: +- return Double.valueOf(getKey_cache_size()); +- +- case ROW_CACHE_SAVE_PERIOD_IN_SECONDS: +- return Integer.valueOf(getRow_cache_save_period_in_seconds()); +- +- case KEY_CACHE_SAVE_PERIOD_IN_SECONDS: +- return Integer.valueOf(getKey_cache_save_period_in_seconds()); +- +- case MEMTABLE_FLUSH_AFTER_MINS: +- return Integer.valueOf(getMemtable_flush_after_mins()); +- +- case MEMTABLE_THROUGHPUT_IN_MB: +- return Integer.valueOf(getMemtable_throughput_in_mb()); +- +- case MEMTABLE_OPERATIONS_IN_MILLIONS: +- return Double.valueOf(getMemtable_operations_in_millions()); +- +- case REPLICATE_ON_WRITE: +- return Boolean.valueOf(isReplicate_on_write()); +- +- case MERGE_SHARDS_CHANCE: +- return Double.valueOf(getMerge_shards_chance()); +- +- case ROW_CACHE_PROVIDER: +- return getRow_cache_provider(); +- +- case ROW_CACHE_KEYS_TO_SAVE: +- return Integer.valueOf(getRow_cache_keys_to_save()); +- +- case POPULATE_IO_CACHE_ON_FLUSH: +- return Boolean.valueOf(isPopulate_io_cache_on_flush()); +- +- case INDEX_INTERVAL: +- return Integer.valueOf(getIndex_interval()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEYSPACE: +- return isSetKeyspace(); +- case NAME: +- return isSetName(); +- case COLUMN_TYPE: +- return isSetColumn_type(); +- case COMPARATOR_TYPE: +- return isSetComparator_type(); +- case SUBCOMPARATOR_TYPE: +- return isSetSubcomparator_type(); +- case COMMENT: +- return isSetComment(); +- case READ_REPAIR_CHANCE: +- return isSetRead_repair_chance(); +- case COLUMN_METADATA: +- return isSetColumn_metadata(); +- case GC_GRACE_SECONDS: +- return isSetGc_grace_seconds(); +- case DEFAULT_VALIDATION_CLASS: +- return isSetDefault_validation_class(); +- case ID: +- return isSetId(); +- case MIN_COMPACTION_THRESHOLD: +- return isSetMin_compaction_threshold(); +- case MAX_COMPACTION_THRESHOLD: +- return isSetMax_compaction_threshold(); +- case KEY_VALIDATION_CLASS: +- return isSetKey_validation_class(); +- case KEY_ALIAS: +- return isSetKey_alias(); +- case COMPACTION_STRATEGY: +- return isSetCompaction_strategy(); +- case COMPACTION_STRATEGY_OPTIONS: +- return isSetCompaction_strategy_options(); +- case COMPRESSION_OPTIONS: +- return isSetCompression_options(); +- case BLOOM_FILTER_FP_CHANCE: +- return isSetBloom_filter_fp_chance(); +- case CACHING: +- return isSetCaching(); +- case DCLOCAL_READ_REPAIR_CHANCE: +- return isSetDclocal_read_repair_chance(); +- case MEMTABLE_FLUSH_PERIOD_IN_MS: +- return isSetMemtable_flush_period_in_ms(); +- case DEFAULT_TIME_TO_LIVE: +- return isSetDefault_time_to_live(); +- case SPECULATIVE_RETRY: +- return isSetSpeculative_retry(); +- case TRIGGERS: +- return isSetTriggers(); +- case CELLS_PER_ROW_TO_CACHE: +- return isSetCells_per_row_to_cache(); +- case MIN_INDEX_INTERVAL: +- return isSetMin_index_interval(); +- case MAX_INDEX_INTERVAL: +- return isSetMax_index_interval(); +- case ROW_CACHE_SIZE: +- return isSetRow_cache_size(); +- case KEY_CACHE_SIZE: +- return isSetKey_cache_size(); +- case ROW_CACHE_SAVE_PERIOD_IN_SECONDS: +- return isSetRow_cache_save_period_in_seconds(); +- case KEY_CACHE_SAVE_PERIOD_IN_SECONDS: +- return isSetKey_cache_save_period_in_seconds(); +- case MEMTABLE_FLUSH_AFTER_MINS: +- return isSetMemtable_flush_after_mins(); +- case MEMTABLE_THROUGHPUT_IN_MB: +- return isSetMemtable_throughput_in_mb(); +- case MEMTABLE_OPERATIONS_IN_MILLIONS: +- return isSetMemtable_operations_in_millions(); +- case REPLICATE_ON_WRITE: +- return isSetReplicate_on_write(); +- case MERGE_SHARDS_CHANCE: +- return isSetMerge_shards_chance(); +- case ROW_CACHE_PROVIDER: +- return isSetRow_cache_provider(); +- case ROW_CACHE_KEYS_TO_SAVE: +- return isSetRow_cache_keys_to_save(); +- case POPULATE_IO_CACHE_ON_FLUSH: +- return isSetPopulate_io_cache_on_flush(); +- case INDEX_INTERVAL: +- return isSetIndex_interval(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof CfDef) +- return this.equals((CfDef)that); +- return false; +- } +- +- public boolean equals(CfDef that) { +- if (that == null) +- return false; +- +- boolean this_present_keyspace = true && this.isSetKeyspace(); +- boolean that_present_keyspace = true && that.isSetKeyspace(); +- if (this_present_keyspace || that_present_keyspace) { +- if (!(this_present_keyspace && that_present_keyspace)) +- return false; +- if (!this.keyspace.equals(that.keyspace)) +- return false; +- } +- +- boolean this_present_name = true && this.isSetName(); +- boolean that_present_name = true && that.isSetName(); +- if (this_present_name || that_present_name) { +- if (!(this_present_name && that_present_name)) +- return false; +- if (!this.name.equals(that.name)) +- return false; +- } +- +- boolean this_present_column_type = true && this.isSetColumn_type(); +- boolean that_present_column_type = true && that.isSetColumn_type(); +- if (this_present_column_type || that_present_column_type) { +- if (!(this_present_column_type && that_present_column_type)) +- return false; +- if (!this.column_type.equals(that.column_type)) +- return false; +- } +- +- boolean this_present_comparator_type = true && this.isSetComparator_type(); +- boolean that_present_comparator_type = true && that.isSetComparator_type(); +- if (this_present_comparator_type || that_present_comparator_type) { +- if (!(this_present_comparator_type && that_present_comparator_type)) +- return false; +- if (!this.comparator_type.equals(that.comparator_type)) +- return false; +- } +- +- boolean this_present_subcomparator_type = true && this.isSetSubcomparator_type(); +- boolean that_present_subcomparator_type = true && that.isSetSubcomparator_type(); +- if (this_present_subcomparator_type || that_present_subcomparator_type) { +- if (!(this_present_subcomparator_type && that_present_subcomparator_type)) +- return false; +- if (!this.subcomparator_type.equals(that.subcomparator_type)) +- return false; +- } +- +- boolean this_present_comment = true && this.isSetComment(); +- boolean that_present_comment = true && that.isSetComment(); +- if (this_present_comment || that_present_comment) { +- if (!(this_present_comment && that_present_comment)) +- return false; +- if (!this.comment.equals(that.comment)) +- return false; +- } +- +- boolean this_present_read_repair_chance = true && this.isSetRead_repair_chance(); +- boolean that_present_read_repair_chance = true && that.isSetRead_repair_chance(); +- if (this_present_read_repair_chance || that_present_read_repair_chance) { +- if (!(this_present_read_repair_chance && that_present_read_repair_chance)) +- return false; +- if (this.read_repair_chance != that.read_repair_chance) +- return false; +- } +- +- boolean this_present_column_metadata = true && this.isSetColumn_metadata(); +- boolean that_present_column_metadata = true && that.isSetColumn_metadata(); +- if (this_present_column_metadata || that_present_column_metadata) { +- if (!(this_present_column_metadata && that_present_column_metadata)) +- return false; +- if (!this.column_metadata.equals(that.column_metadata)) +- return false; +- } +- +- boolean this_present_gc_grace_seconds = true && this.isSetGc_grace_seconds(); +- boolean that_present_gc_grace_seconds = true && that.isSetGc_grace_seconds(); +- if (this_present_gc_grace_seconds || that_present_gc_grace_seconds) { +- if (!(this_present_gc_grace_seconds && that_present_gc_grace_seconds)) +- return false; +- if (this.gc_grace_seconds != that.gc_grace_seconds) +- return false; +- } +- +- boolean this_present_default_validation_class = true && this.isSetDefault_validation_class(); +- boolean that_present_default_validation_class = true && that.isSetDefault_validation_class(); +- if (this_present_default_validation_class || that_present_default_validation_class) { +- if (!(this_present_default_validation_class && that_present_default_validation_class)) +- return false; +- if (!this.default_validation_class.equals(that.default_validation_class)) +- return false; +- } +- +- boolean this_present_id = true && this.isSetId(); +- boolean that_present_id = true && that.isSetId(); +- if (this_present_id || that_present_id) { +- if (!(this_present_id && that_present_id)) +- return false; +- if (this.id != that.id) +- return false; +- } +- +- boolean this_present_min_compaction_threshold = true && this.isSetMin_compaction_threshold(); +- boolean that_present_min_compaction_threshold = true && that.isSetMin_compaction_threshold(); +- if (this_present_min_compaction_threshold || that_present_min_compaction_threshold) { +- if (!(this_present_min_compaction_threshold && that_present_min_compaction_threshold)) +- return false; +- if (this.min_compaction_threshold != that.min_compaction_threshold) +- return false; +- } +- +- boolean this_present_max_compaction_threshold = true && this.isSetMax_compaction_threshold(); +- boolean that_present_max_compaction_threshold = true && that.isSetMax_compaction_threshold(); +- if (this_present_max_compaction_threshold || that_present_max_compaction_threshold) { +- if (!(this_present_max_compaction_threshold && that_present_max_compaction_threshold)) +- return false; +- if (this.max_compaction_threshold != that.max_compaction_threshold) +- return false; +- } +- +- boolean this_present_key_validation_class = true && this.isSetKey_validation_class(); +- boolean that_present_key_validation_class = true && that.isSetKey_validation_class(); +- if (this_present_key_validation_class || that_present_key_validation_class) { +- if (!(this_present_key_validation_class && that_present_key_validation_class)) +- return false; +- if (!this.key_validation_class.equals(that.key_validation_class)) +- return false; +- } +- +- boolean this_present_key_alias = true && this.isSetKey_alias(); +- boolean that_present_key_alias = true && that.isSetKey_alias(); +- if (this_present_key_alias || that_present_key_alias) { +- if (!(this_present_key_alias && that_present_key_alias)) +- return false; +- if (!this.key_alias.equals(that.key_alias)) +- return false; +- } +- +- boolean this_present_compaction_strategy = true && this.isSetCompaction_strategy(); +- boolean that_present_compaction_strategy = true && that.isSetCompaction_strategy(); +- if (this_present_compaction_strategy || that_present_compaction_strategy) { +- if (!(this_present_compaction_strategy && that_present_compaction_strategy)) +- return false; +- if (!this.compaction_strategy.equals(that.compaction_strategy)) +- return false; +- } +- +- boolean this_present_compaction_strategy_options = true && this.isSetCompaction_strategy_options(); +- boolean that_present_compaction_strategy_options = true && that.isSetCompaction_strategy_options(); +- if (this_present_compaction_strategy_options || that_present_compaction_strategy_options) { +- if (!(this_present_compaction_strategy_options && that_present_compaction_strategy_options)) +- return false; +- if (!this.compaction_strategy_options.equals(that.compaction_strategy_options)) +- return false; +- } +- +- boolean this_present_compression_options = true && this.isSetCompression_options(); +- boolean that_present_compression_options = true && that.isSetCompression_options(); +- if (this_present_compression_options || that_present_compression_options) { +- if (!(this_present_compression_options && that_present_compression_options)) +- return false; +- if (!this.compression_options.equals(that.compression_options)) +- return false; +- } +- +- boolean this_present_bloom_filter_fp_chance = true && this.isSetBloom_filter_fp_chance(); +- boolean that_present_bloom_filter_fp_chance = true && that.isSetBloom_filter_fp_chance(); +- if (this_present_bloom_filter_fp_chance || that_present_bloom_filter_fp_chance) { +- if (!(this_present_bloom_filter_fp_chance && that_present_bloom_filter_fp_chance)) +- return false; +- if (this.bloom_filter_fp_chance != that.bloom_filter_fp_chance) +- return false; +- } +- +- boolean this_present_caching = true && this.isSetCaching(); +- boolean that_present_caching = true && that.isSetCaching(); +- if (this_present_caching || that_present_caching) { +- if (!(this_present_caching && that_present_caching)) +- return false; +- if (!this.caching.equals(that.caching)) +- return false; +- } +- +- boolean this_present_dclocal_read_repair_chance = true && this.isSetDclocal_read_repair_chance(); +- boolean that_present_dclocal_read_repair_chance = true && that.isSetDclocal_read_repair_chance(); +- if (this_present_dclocal_read_repair_chance || that_present_dclocal_read_repair_chance) { +- if (!(this_present_dclocal_read_repair_chance && that_present_dclocal_read_repair_chance)) +- return false; +- if (this.dclocal_read_repair_chance != that.dclocal_read_repair_chance) +- return false; +- } +- +- boolean this_present_memtable_flush_period_in_ms = true && this.isSetMemtable_flush_period_in_ms(); +- boolean that_present_memtable_flush_period_in_ms = true && that.isSetMemtable_flush_period_in_ms(); +- if (this_present_memtable_flush_period_in_ms || that_present_memtable_flush_period_in_ms) { +- if (!(this_present_memtable_flush_period_in_ms && that_present_memtable_flush_period_in_ms)) +- return false; +- if (this.memtable_flush_period_in_ms != that.memtable_flush_period_in_ms) +- return false; +- } +- +- boolean this_present_default_time_to_live = true && this.isSetDefault_time_to_live(); +- boolean that_present_default_time_to_live = true && that.isSetDefault_time_to_live(); +- if (this_present_default_time_to_live || that_present_default_time_to_live) { +- if (!(this_present_default_time_to_live && that_present_default_time_to_live)) +- return false; +- if (this.default_time_to_live != that.default_time_to_live) +- return false; +- } +- +- boolean this_present_speculative_retry = true && this.isSetSpeculative_retry(); +- boolean that_present_speculative_retry = true && that.isSetSpeculative_retry(); +- if (this_present_speculative_retry || that_present_speculative_retry) { +- if (!(this_present_speculative_retry && that_present_speculative_retry)) +- return false; +- if (!this.speculative_retry.equals(that.speculative_retry)) +- return false; +- } +- +- boolean this_present_triggers = true && this.isSetTriggers(); +- boolean that_present_triggers = true && that.isSetTriggers(); +- if (this_present_triggers || that_present_triggers) { +- if (!(this_present_triggers && that_present_triggers)) +- return false; +- if (!this.triggers.equals(that.triggers)) +- return false; +- } +- +- boolean this_present_cells_per_row_to_cache = true && this.isSetCells_per_row_to_cache(); +- boolean that_present_cells_per_row_to_cache = true && that.isSetCells_per_row_to_cache(); +- if (this_present_cells_per_row_to_cache || that_present_cells_per_row_to_cache) { +- if (!(this_present_cells_per_row_to_cache && that_present_cells_per_row_to_cache)) +- return false; +- if (!this.cells_per_row_to_cache.equals(that.cells_per_row_to_cache)) +- return false; +- } +- +- boolean this_present_min_index_interval = true && this.isSetMin_index_interval(); +- boolean that_present_min_index_interval = true && that.isSetMin_index_interval(); +- if (this_present_min_index_interval || that_present_min_index_interval) { +- if (!(this_present_min_index_interval && that_present_min_index_interval)) +- return false; +- if (this.min_index_interval != that.min_index_interval) +- return false; +- } +- +- boolean this_present_max_index_interval = true && this.isSetMax_index_interval(); +- boolean that_present_max_index_interval = true && that.isSetMax_index_interval(); +- if (this_present_max_index_interval || that_present_max_index_interval) { +- if (!(this_present_max_index_interval && that_present_max_index_interval)) +- return false; +- if (this.max_index_interval != that.max_index_interval) +- return false; +- } +- +- boolean this_present_row_cache_size = true && this.isSetRow_cache_size(); +- boolean that_present_row_cache_size = true && that.isSetRow_cache_size(); +- if (this_present_row_cache_size || that_present_row_cache_size) { +- if (!(this_present_row_cache_size && that_present_row_cache_size)) +- return false; +- if (this.row_cache_size != that.row_cache_size) +- return false; +- } +- +- boolean this_present_key_cache_size = true && this.isSetKey_cache_size(); +- boolean that_present_key_cache_size = true && that.isSetKey_cache_size(); +- if (this_present_key_cache_size || that_present_key_cache_size) { +- if (!(this_present_key_cache_size && that_present_key_cache_size)) +- return false; +- if (this.key_cache_size != that.key_cache_size) +- return false; +- } +- +- boolean this_present_row_cache_save_period_in_seconds = true && this.isSetRow_cache_save_period_in_seconds(); +- boolean that_present_row_cache_save_period_in_seconds = true && that.isSetRow_cache_save_period_in_seconds(); +- if (this_present_row_cache_save_period_in_seconds || that_present_row_cache_save_period_in_seconds) { +- if (!(this_present_row_cache_save_period_in_seconds && that_present_row_cache_save_period_in_seconds)) +- return false; +- if (this.row_cache_save_period_in_seconds != that.row_cache_save_period_in_seconds) +- return false; +- } +- +- boolean this_present_key_cache_save_period_in_seconds = true && this.isSetKey_cache_save_period_in_seconds(); +- boolean that_present_key_cache_save_period_in_seconds = true && that.isSetKey_cache_save_period_in_seconds(); +- if (this_present_key_cache_save_period_in_seconds || that_present_key_cache_save_period_in_seconds) { +- if (!(this_present_key_cache_save_period_in_seconds && that_present_key_cache_save_period_in_seconds)) +- return false; +- if (this.key_cache_save_period_in_seconds != that.key_cache_save_period_in_seconds) +- return false; +- } +- +- boolean this_present_memtable_flush_after_mins = true && this.isSetMemtable_flush_after_mins(); +- boolean that_present_memtable_flush_after_mins = true && that.isSetMemtable_flush_after_mins(); +- if (this_present_memtable_flush_after_mins || that_present_memtable_flush_after_mins) { +- if (!(this_present_memtable_flush_after_mins && that_present_memtable_flush_after_mins)) +- return false; +- if (this.memtable_flush_after_mins != that.memtable_flush_after_mins) +- return false; +- } +- +- boolean this_present_memtable_throughput_in_mb = true && this.isSetMemtable_throughput_in_mb(); +- boolean that_present_memtable_throughput_in_mb = true && that.isSetMemtable_throughput_in_mb(); +- if (this_present_memtable_throughput_in_mb || that_present_memtable_throughput_in_mb) { +- if (!(this_present_memtable_throughput_in_mb && that_present_memtable_throughput_in_mb)) +- return false; +- if (this.memtable_throughput_in_mb != that.memtable_throughput_in_mb) +- return false; +- } +- +- boolean this_present_memtable_operations_in_millions = true && this.isSetMemtable_operations_in_millions(); +- boolean that_present_memtable_operations_in_millions = true && that.isSetMemtable_operations_in_millions(); +- if (this_present_memtable_operations_in_millions || that_present_memtable_operations_in_millions) { +- if (!(this_present_memtable_operations_in_millions && that_present_memtable_operations_in_millions)) +- return false; +- if (this.memtable_operations_in_millions != that.memtable_operations_in_millions) +- return false; +- } +- +- boolean this_present_replicate_on_write = true && this.isSetReplicate_on_write(); +- boolean that_present_replicate_on_write = true && that.isSetReplicate_on_write(); +- if (this_present_replicate_on_write || that_present_replicate_on_write) { +- if (!(this_present_replicate_on_write && that_present_replicate_on_write)) +- return false; +- if (this.replicate_on_write != that.replicate_on_write) +- return false; +- } +- +- boolean this_present_merge_shards_chance = true && this.isSetMerge_shards_chance(); +- boolean that_present_merge_shards_chance = true && that.isSetMerge_shards_chance(); +- if (this_present_merge_shards_chance || that_present_merge_shards_chance) { +- if (!(this_present_merge_shards_chance && that_present_merge_shards_chance)) +- return false; +- if (this.merge_shards_chance != that.merge_shards_chance) +- return false; +- } +- +- boolean this_present_row_cache_provider = true && this.isSetRow_cache_provider(); +- boolean that_present_row_cache_provider = true && that.isSetRow_cache_provider(); +- if (this_present_row_cache_provider || that_present_row_cache_provider) { +- if (!(this_present_row_cache_provider && that_present_row_cache_provider)) +- return false; +- if (!this.row_cache_provider.equals(that.row_cache_provider)) +- return false; +- } +- +- boolean this_present_row_cache_keys_to_save = true && this.isSetRow_cache_keys_to_save(); +- boolean that_present_row_cache_keys_to_save = true && that.isSetRow_cache_keys_to_save(); +- if (this_present_row_cache_keys_to_save || that_present_row_cache_keys_to_save) { +- if (!(this_present_row_cache_keys_to_save && that_present_row_cache_keys_to_save)) +- return false; +- if (this.row_cache_keys_to_save != that.row_cache_keys_to_save) +- return false; +- } +- +- boolean this_present_populate_io_cache_on_flush = true && this.isSetPopulate_io_cache_on_flush(); +- boolean that_present_populate_io_cache_on_flush = true && that.isSetPopulate_io_cache_on_flush(); +- if (this_present_populate_io_cache_on_flush || that_present_populate_io_cache_on_flush) { +- if (!(this_present_populate_io_cache_on_flush && that_present_populate_io_cache_on_flush)) +- return false; +- if (this.populate_io_cache_on_flush != that.populate_io_cache_on_flush) +- return false; +- } +- +- boolean this_present_index_interval = true && this.isSetIndex_interval(); +- boolean that_present_index_interval = true && that.isSetIndex_interval(); +- if (this_present_index_interval || that_present_index_interval) { +- if (!(this_present_index_interval && that_present_index_interval)) +- return false; +- if (this.index_interval != that.index_interval) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_keyspace = true && (isSetKeyspace()); +- builder.append(present_keyspace); +- if (present_keyspace) +- builder.append(keyspace); +- +- boolean present_name = true && (isSetName()); +- builder.append(present_name); +- if (present_name) +- builder.append(name); +- +- boolean present_column_type = true && (isSetColumn_type()); +- builder.append(present_column_type); +- if (present_column_type) +- builder.append(column_type); +- +- boolean present_comparator_type = true && (isSetComparator_type()); +- builder.append(present_comparator_type); +- if (present_comparator_type) +- builder.append(comparator_type); +- +- boolean present_subcomparator_type = true && (isSetSubcomparator_type()); +- builder.append(present_subcomparator_type); +- if (present_subcomparator_type) +- builder.append(subcomparator_type); +- +- boolean present_comment = true && (isSetComment()); +- builder.append(present_comment); +- if (present_comment) +- builder.append(comment); +- +- boolean present_read_repair_chance = true && (isSetRead_repair_chance()); +- builder.append(present_read_repair_chance); +- if (present_read_repair_chance) +- builder.append(read_repair_chance); +- +- boolean present_column_metadata = true && (isSetColumn_metadata()); +- builder.append(present_column_metadata); +- if (present_column_metadata) +- builder.append(column_metadata); +- +- boolean present_gc_grace_seconds = true && (isSetGc_grace_seconds()); +- builder.append(present_gc_grace_seconds); +- if (present_gc_grace_seconds) +- builder.append(gc_grace_seconds); +- +- boolean present_default_validation_class = true && (isSetDefault_validation_class()); +- builder.append(present_default_validation_class); +- if (present_default_validation_class) +- builder.append(default_validation_class); +- +- boolean present_id = true && (isSetId()); +- builder.append(present_id); +- if (present_id) +- builder.append(id); +- +- boolean present_min_compaction_threshold = true && (isSetMin_compaction_threshold()); +- builder.append(present_min_compaction_threshold); +- if (present_min_compaction_threshold) +- builder.append(min_compaction_threshold); +- +- boolean present_max_compaction_threshold = true && (isSetMax_compaction_threshold()); +- builder.append(present_max_compaction_threshold); +- if (present_max_compaction_threshold) +- builder.append(max_compaction_threshold); +- +- boolean present_key_validation_class = true && (isSetKey_validation_class()); +- builder.append(present_key_validation_class); +- if (present_key_validation_class) +- builder.append(key_validation_class); +- +- boolean present_key_alias = true && (isSetKey_alias()); +- builder.append(present_key_alias); +- if (present_key_alias) +- builder.append(key_alias); +- +- boolean present_compaction_strategy = true && (isSetCompaction_strategy()); +- builder.append(present_compaction_strategy); +- if (present_compaction_strategy) +- builder.append(compaction_strategy); +- +- boolean present_compaction_strategy_options = true && (isSetCompaction_strategy_options()); +- builder.append(present_compaction_strategy_options); +- if (present_compaction_strategy_options) +- builder.append(compaction_strategy_options); +- +- boolean present_compression_options = true && (isSetCompression_options()); +- builder.append(present_compression_options); +- if (present_compression_options) +- builder.append(compression_options); +- +- boolean present_bloom_filter_fp_chance = true && (isSetBloom_filter_fp_chance()); +- builder.append(present_bloom_filter_fp_chance); +- if (present_bloom_filter_fp_chance) +- builder.append(bloom_filter_fp_chance); +- +- boolean present_caching = true && (isSetCaching()); +- builder.append(present_caching); +- if (present_caching) +- builder.append(caching); +- +- boolean present_dclocal_read_repair_chance = true && (isSetDclocal_read_repair_chance()); +- builder.append(present_dclocal_read_repair_chance); +- if (present_dclocal_read_repair_chance) +- builder.append(dclocal_read_repair_chance); +- +- boolean present_memtable_flush_period_in_ms = true && (isSetMemtable_flush_period_in_ms()); +- builder.append(present_memtable_flush_period_in_ms); +- if (present_memtable_flush_period_in_ms) +- builder.append(memtable_flush_period_in_ms); +- +- boolean present_default_time_to_live = true && (isSetDefault_time_to_live()); +- builder.append(present_default_time_to_live); +- if (present_default_time_to_live) +- builder.append(default_time_to_live); +- +- boolean present_speculative_retry = true && (isSetSpeculative_retry()); +- builder.append(present_speculative_retry); +- if (present_speculative_retry) +- builder.append(speculative_retry); +- +- boolean present_triggers = true && (isSetTriggers()); +- builder.append(present_triggers); +- if (present_triggers) +- builder.append(triggers); +- +- boolean present_cells_per_row_to_cache = true && (isSetCells_per_row_to_cache()); +- builder.append(present_cells_per_row_to_cache); +- if (present_cells_per_row_to_cache) +- builder.append(cells_per_row_to_cache); +- +- boolean present_min_index_interval = true && (isSetMin_index_interval()); +- builder.append(present_min_index_interval); +- if (present_min_index_interval) +- builder.append(min_index_interval); +- +- boolean present_max_index_interval = true && (isSetMax_index_interval()); +- builder.append(present_max_index_interval); +- if (present_max_index_interval) +- builder.append(max_index_interval); +- +- boolean present_row_cache_size = true && (isSetRow_cache_size()); +- builder.append(present_row_cache_size); +- if (present_row_cache_size) +- builder.append(row_cache_size); +- +- boolean present_key_cache_size = true && (isSetKey_cache_size()); +- builder.append(present_key_cache_size); +- if (present_key_cache_size) +- builder.append(key_cache_size); +- +- boolean present_row_cache_save_period_in_seconds = true && (isSetRow_cache_save_period_in_seconds()); +- builder.append(present_row_cache_save_period_in_seconds); +- if (present_row_cache_save_period_in_seconds) +- builder.append(row_cache_save_period_in_seconds); +- +- boolean present_key_cache_save_period_in_seconds = true && (isSetKey_cache_save_period_in_seconds()); +- builder.append(present_key_cache_save_period_in_seconds); +- if (present_key_cache_save_period_in_seconds) +- builder.append(key_cache_save_period_in_seconds); +- +- boolean present_memtable_flush_after_mins = true && (isSetMemtable_flush_after_mins()); +- builder.append(present_memtable_flush_after_mins); +- if (present_memtable_flush_after_mins) +- builder.append(memtable_flush_after_mins); +- +- boolean present_memtable_throughput_in_mb = true && (isSetMemtable_throughput_in_mb()); +- builder.append(present_memtable_throughput_in_mb); +- if (present_memtable_throughput_in_mb) +- builder.append(memtable_throughput_in_mb); +- +- boolean present_memtable_operations_in_millions = true && (isSetMemtable_operations_in_millions()); +- builder.append(present_memtable_operations_in_millions); +- if (present_memtable_operations_in_millions) +- builder.append(memtable_operations_in_millions); +- +- boolean present_replicate_on_write = true && (isSetReplicate_on_write()); +- builder.append(present_replicate_on_write); +- if (present_replicate_on_write) +- builder.append(replicate_on_write); +- +- boolean present_merge_shards_chance = true && (isSetMerge_shards_chance()); +- builder.append(present_merge_shards_chance); +- if (present_merge_shards_chance) +- builder.append(merge_shards_chance); +- +- boolean present_row_cache_provider = true && (isSetRow_cache_provider()); +- builder.append(present_row_cache_provider); +- if (present_row_cache_provider) +- builder.append(row_cache_provider); +- +- boolean present_row_cache_keys_to_save = true && (isSetRow_cache_keys_to_save()); +- builder.append(present_row_cache_keys_to_save); +- if (present_row_cache_keys_to_save) +- builder.append(row_cache_keys_to_save); +- +- boolean present_populate_io_cache_on_flush = true && (isSetPopulate_io_cache_on_flush()); +- builder.append(present_populate_io_cache_on_flush); +- if (present_populate_io_cache_on_flush) +- builder.append(populate_io_cache_on_flush); +- +- boolean present_index_interval = true && (isSetIndex_interval()); +- builder.append(present_index_interval); +- if (present_index_interval) +- builder.append(index_interval); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(CfDef other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKeyspace()).compareTo(other.isSetKeyspace()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKeyspace()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keyspace, other.keyspace); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetName()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_type()).compareTo(other.isSetColumn_type()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_type()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_type, other.column_type); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetComparator_type()).compareTo(other.isSetComparator_type()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetComparator_type()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.comparator_type, other.comparator_type); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSubcomparator_type()).compareTo(other.isSetSubcomparator_type()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSubcomparator_type()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.subcomparator_type, other.subcomparator_type); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetComment()).compareTo(other.isSetComment()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetComment()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.comment, other.comment); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRead_repair_chance()).compareTo(other.isSetRead_repair_chance()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRead_repair_chance()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.read_repair_chance, other.read_repair_chance); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_metadata()).compareTo(other.isSetColumn_metadata()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_metadata()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_metadata, other.column_metadata); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetGc_grace_seconds()).compareTo(other.isSetGc_grace_seconds()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetGc_grace_seconds()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.gc_grace_seconds, other.gc_grace_seconds); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetDefault_validation_class()).compareTo(other.isSetDefault_validation_class()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetDefault_validation_class()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.default_validation_class, other.default_validation_class); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetId()).compareTo(other.isSetId()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetId()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id, other.id); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetMin_compaction_threshold()).compareTo(other.isSetMin_compaction_threshold()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMin_compaction_threshold()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.min_compaction_threshold, other.min_compaction_threshold); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetMax_compaction_threshold()).compareTo(other.isSetMax_compaction_threshold()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMax_compaction_threshold()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_compaction_threshold, other.max_compaction_threshold); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetKey_validation_class()).compareTo(other.isSetKey_validation_class()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey_validation_class()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key_validation_class, other.key_validation_class); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetKey_alias()).compareTo(other.isSetKey_alias()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey_alias()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key_alias, other.key_alias); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCompaction_strategy()).compareTo(other.isSetCompaction_strategy()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCompaction_strategy()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compaction_strategy, other.compaction_strategy); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCompaction_strategy_options()).compareTo(other.isSetCompaction_strategy_options()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCompaction_strategy_options()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compaction_strategy_options, other.compaction_strategy_options); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCompression_options()).compareTo(other.isSetCompression_options()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCompression_options()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compression_options, other.compression_options); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetBloom_filter_fp_chance()).compareTo(other.isSetBloom_filter_fp_chance()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetBloom_filter_fp_chance()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bloom_filter_fp_chance, other.bloom_filter_fp_chance); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCaching()).compareTo(other.isSetCaching()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCaching()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.caching, other.caching); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetDclocal_read_repair_chance()).compareTo(other.isSetDclocal_read_repair_chance()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetDclocal_read_repair_chance()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dclocal_read_repair_chance, other.dclocal_read_repair_chance); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetMemtable_flush_period_in_ms()).compareTo(other.isSetMemtable_flush_period_in_ms()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMemtable_flush_period_in_ms()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.memtable_flush_period_in_ms, other.memtable_flush_period_in_ms); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetDefault_time_to_live()).compareTo(other.isSetDefault_time_to_live()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetDefault_time_to_live()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.default_time_to_live, other.default_time_to_live); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSpeculative_retry()).compareTo(other.isSetSpeculative_retry()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSpeculative_retry()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.speculative_retry, other.speculative_retry); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTriggers()).compareTo(other.isSetTriggers()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTriggers()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.triggers, other.triggers); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCells_per_row_to_cache()).compareTo(other.isSetCells_per_row_to_cache()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCells_per_row_to_cache()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cells_per_row_to_cache, other.cells_per_row_to_cache); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetMin_index_interval()).compareTo(other.isSetMin_index_interval()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMin_index_interval()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.min_index_interval, other.min_index_interval); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetMax_index_interval()).compareTo(other.isSetMax_index_interval()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMax_index_interval()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_index_interval, other.max_index_interval); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRow_cache_size()).compareTo(other.isSetRow_cache_size()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRow_cache_size()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row_cache_size, other.row_cache_size); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetKey_cache_size()).compareTo(other.isSetKey_cache_size()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey_cache_size()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key_cache_size, other.key_cache_size); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRow_cache_save_period_in_seconds()).compareTo(other.isSetRow_cache_save_period_in_seconds()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRow_cache_save_period_in_seconds()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row_cache_save_period_in_seconds, other.row_cache_save_period_in_seconds); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetKey_cache_save_period_in_seconds()).compareTo(other.isSetKey_cache_save_period_in_seconds()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey_cache_save_period_in_seconds()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key_cache_save_period_in_seconds, other.key_cache_save_period_in_seconds); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetMemtable_flush_after_mins()).compareTo(other.isSetMemtable_flush_after_mins()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMemtable_flush_after_mins()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.memtable_flush_after_mins, other.memtable_flush_after_mins); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetMemtable_throughput_in_mb()).compareTo(other.isSetMemtable_throughput_in_mb()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMemtable_throughput_in_mb()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.memtable_throughput_in_mb, other.memtable_throughput_in_mb); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetMemtable_operations_in_millions()).compareTo(other.isSetMemtable_operations_in_millions()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMemtable_operations_in_millions()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.memtable_operations_in_millions, other.memtable_operations_in_millions); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetReplicate_on_write()).compareTo(other.isSetReplicate_on_write()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetReplicate_on_write()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replicate_on_write, other.replicate_on_write); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetMerge_shards_chance()).compareTo(other.isSetMerge_shards_chance()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetMerge_shards_chance()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.merge_shards_chance, other.merge_shards_chance); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRow_cache_provider()).compareTo(other.isSetRow_cache_provider()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRow_cache_provider()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row_cache_provider, other.row_cache_provider); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRow_cache_keys_to_save()).compareTo(other.isSetRow_cache_keys_to_save()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRow_cache_keys_to_save()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row_cache_keys_to_save, other.row_cache_keys_to_save); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetPopulate_io_cache_on_flush()).compareTo(other.isSetPopulate_io_cache_on_flush()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetPopulate_io_cache_on_flush()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.populate_io_cache_on_flush, other.populate_io_cache_on_flush); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIndex_interval()).compareTo(other.isSetIndex_interval()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIndex_interval()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_interval, other.index_interval); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("CfDef("); +- boolean first = true; +- +- sb.append("keyspace:"); +- if (this.keyspace == null) { +- sb.append("null"); +- } else { +- sb.append(this.keyspace); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("name:"); +- if (this.name == null) { +- sb.append("null"); +- } else { +- sb.append(this.name); +- } +- first = false; +- if (isSetColumn_type()) { +- if (!first) sb.append(", "); +- sb.append("column_type:"); +- if (this.column_type == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_type); +- } +- first = false; +- } +- if (isSetComparator_type()) { +- if (!first) sb.append(", "); +- sb.append("comparator_type:"); +- if (this.comparator_type == null) { +- sb.append("null"); +- } else { +- sb.append(this.comparator_type); +- } +- first = false; +- } +- if (isSetSubcomparator_type()) { +- if (!first) sb.append(", "); +- sb.append("subcomparator_type:"); +- if (this.subcomparator_type == null) { +- sb.append("null"); +- } else { +- sb.append(this.subcomparator_type); +- } +- first = false; +- } +- if (isSetComment()) { +- if (!first) sb.append(", "); +- sb.append("comment:"); +- if (this.comment == null) { +- sb.append("null"); +- } else { +- sb.append(this.comment); +- } +- first = false; +- } +- if (isSetRead_repair_chance()) { +- if (!first) sb.append(", "); +- sb.append("read_repair_chance:"); +- sb.append(this.read_repair_chance); +- first = false; +- } +- if (isSetColumn_metadata()) { +- if (!first) sb.append(", "); +- sb.append("column_metadata:"); +- if (this.column_metadata == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_metadata); +- } +- first = false; +- } +- if (isSetGc_grace_seconds()) { +- if (!first) sb.append(", "); +- sb.append("gc_grace_seconds:"); +- sb.append(this.gc_grace_seconds); +- first = false; +- } +- if (isSetDefault_validation_class()) { +- if (!first) sb.append(", "); +- sb.append("default_validation_class:"); +- if (this.default_validation_class == null) { +- sb.append("null"); +- } else { +- sb.append(this.default_validation_class); +- } +- first = false; +- } +- if (isSetId()) { +- if (!first) sb.append(", "); +- sb.append("id:"); +- sb.append(this.id); +- first = false; +- } +- if (isSetMin_compaction_threshold()) { +- if (!first) sb.append(", "); +- sb.append("min_compaction_threshold:"); +- sb.append(this.min_compaction_threshold); +- first = false; +- } +- if (isSetMax_compaction_threshold()) { +- if (!first) sb.append(", "); +- sb.append("max_compaction_threshold:"); +- sb.append(this.max_compaction_threshold); +- first = false; +- } +- if (isSetKey_validation_class()) { +- if (!first) sb.append(", "); +- sb.append("key_validation_class:"); +- if (this.key_validation_class == null) { +- sb.append("null"); +- } else { +- sb.append(this.key_validation_class); +- } +- first = false; +- } +- if (isSetKey_alias()) { +- if (!first) sb.append(", "); +- sb.append("key_alias:"); +- if (this.key_alias == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key_alias, sb); +- } +- first = false; +- } +- if (isSetCompaction_strategy()) { +- if (!first) sb.append(", "); +- sb.append("compaction_strategy:"); +- if (this.compaction_strategy == null) { +- sb.append("null"); +- } else { +- sb.append(this.compaction_strategy); +- } +- first = false; +- } +- if (isSetCompaction_strategy_options()) { +- if (!first) sb.append(", "); +- sb.append("compaction_strategy_options:"); +- if (this.compaction_strategy_options == null) { +- sb.append("null"); +- } else { +- sb.append(this.compaction_strategy_options); +- } +- first = false; +- } +- if (isSetCompression_options()) { +- if (!first) sb.append(", "); +- sb.append("compression_options:"); +- if (this.compression_options == null) { +- sb.append("null"); +- } else { +- sb.append(this.compression_options); +- } +- first = false; +- } +- if (isSetBloom_filter_fp_chance()) { +- if (!first) sb.append(", "); +- sb.append("bloom_filter_fp_chance:"); +- sb.append(this.bloom_filter_fp_chance); +- first = false; +- } +- if (isSetCaching()) { +- if (!first) sb.append(", "); +- sb.append("caching:"); +- if (this.caching == null) { +- sb.append("null"); +- } else { +- sb.append(this.caching); +- } +- first = false; +- } +- if (isSetDclocal_read_repair_chance()) { +- if (!first) sb.append(", "); +- sb.append("dclocal_read_repair_chance:"); +- sb.append(this.dclocal_read_repair_chance); +- first = false; +- } +- if (isSetMemtable_flush_period_in_ms()) { +- if (!first) sb.append(", "); +- sb.append("memtable_flush_period_in_ms:"); +- sb.append(this.memtable_flush_period_in_ms); +- first = false; +- } +- if (isSetDefault_time_to_live()) { +- if (!first) sb.append(", "); +- sb.append("default_time_to_live:"); +- sb.append(this.default_time_to_live); +- first = false; +- } +- if (isSetSpeculative_retry()) { +- if (!first) sb.append(", "); +- sb.append("speculative_retry:"); +- if (this.speculative_retry == null) { +- sb.append("null"); +- } else { +- sb.append(this.speculative_retry); +- } +- first = false; +- } +- if (isSetTriggers()) { +- if (!first) sb.append(", "); +- sb.append("triggers:"); +- if (this.triggers == null) { +- sb.append("null"); +- } else { +- sb.append(this.triggers); +- } +- first = false; +- } +- if (isSetCells_per_row_to_cache()) { +- if (!first) sb.append(", "); +- sb.append("cells_per_row_to_cache:"); +- if (this.cells_per_row_to_cache == null) { +- sb.append("null"); +- } else { +- sb.append(this.cells_per_row_to_cache); +- } +- first = false; +- } +- if (isSetMin_index_interval()) { +- if (!first) sb.append(", "); +- sb.append("min_index_interval:"); +- sb.append(this.min_index_interval); +- first = false; +- } +- if (isSetMax_index_interval()) { +- if (!first) sb.append(", "); +- sb.append("max_index_interval:"); +- sb.append(this.max_index_interval); +- first = false; +- } +- if (isSetRow_cache_size()) { +- if (!first) sb.append(", "); +- sb.append("row_cache_size:"); +- sb.append(this.row_cache_size); +- first = false; +- } +- if (isSetKey_cache_size()) { +- if (!first) sb.append(", "); +- sb.append("key_cache_size:"); +- sb.append(this.key_cache_size); +- first = false; +- } +- if (isSetRow_cache_save_period_in_seconds()) { +- if (!first) sb.append(", "); +- sb.append("row_cache_save_period_in_seconds:"); +- sb.append(this.row_cache_save_period_in_seconds); +- first = false; +- } +- if (isSetKey_cache_save_period_in_seconds()) { +- if (!first) sb.append(", "); +- sb.append("key_cache_save_period_in_seconds:"); +- sb.append(this.key_cache_save_period_in_seconds); +- first = false; +- } +- if (isSetMemtable_flush_after_mins()) { +- if (!first) sb.append(", "); +- sb.append("memtable_flush_after_mins:"); +- sb.append(this.memtable_flush_after_mins); +- first = false; +- } +- if (isSetMemtable_throughput_in_mb()) { +- if (!first) sb.append(", "); +- sb.append("memtable_throughput_in_mb:"); +- sb.append(this.memtable_throughput_in_mb); +- first = false; +- } +- if (isSetMemtable_operations_in_millions()) { +- if (!first) sb.append(", "); +- sb.append("memtable_operations_in_millions:"); +- sb.append(this.memtable_operations_in_millions); +- first = false; +- } +- if (isSetReplicate_on_write()) { +- if (!first) sb.append(", "); +- sb.append("replicate_on_write:"); +- sb.append(this.replicate_on_write); +- first = false; +- } +- if (isSetMerge_shards_chance()) { +- if (!first) sb.append(", "); +- sb.append("merge_shards_chance:"); +- sb.append(this.merge_shards_chance); +- first = false; +- } +- if (isSetRow_cache_provider()) { +- if (!first) sb.append(", "); +- sb.append("row_cache_provider:"); +- if (this.row_cache_provider == null) { +- sb.append("null"); +- } else { +- sb.append(this.row_cache_provider); +- } +- first = false; +- } +- if (isSetRow_cache_keys_to_save()) { +- if (!first) sb.append(", "); +- sb.append("row_cache_keys_to_save:"); +- sb.append(this.row_cache_keys_to_save); +- first = false; +- } +- if (isSetPopulate_io_cache_on_flush()) { +- if (!first) sb.append(", "); +- sb.append("populate_io_cache_on_flush:"); +- sb.append(this.populate_io_cache_on_flush); +- first = false; +- } +- if (isSetIndex_interval()) { +- if (!first) sb.append(", "); +- sb.append("index_interval:"); +- sb.append(this.index_interval); +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (keyspace == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'keyspace' was not present! Struct: " + toString()); +- } +- if (name == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class CfDefStandardSchemeFactory implements SchemeFactory { +- public CfDefStandardScheme getScheme() { +- return new CfDefStandardScheme(); +- } +- } +- +- private static class CfDefStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, CfDef struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEYSPACE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.name = iprot.readString(); +- struct.setNameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // COLUMN_TYPE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.column_type = iprot.readString(); +- struct.setColumn_typeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 5: // COMPARATOR_TYPE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.comparator_type = iprot.readString(); +- struct.setComparator_typeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 6: // SUBCOMPARATOR_TYPE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.subcomparator_type = iprot.readString(); +- struct.setSubcomparator_typeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 8: // COMMENT +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.comment = iprot.readString(); +- struct.setCommentIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 12: // READ_REPAIR_CHANCE +- if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { +- struct.read_repair_chance = iprot.readDouble(); +- struct.setRead_repair_chanceIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 13: // COLUMN_METADATA +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list110 = iprot.readListBegin(); +- struct.column_metadata = new ArrayList(_list110.size); +- for (int _i111 = 0; _i111 < _list110.size; ++_i111) +- { +- ColumnDef _elem112; +- _elem112 = new ColumnDef(); +- _elem112.read(iprot); +- struct.column_metadata.add(_elem112); +- } +- iprot.readListEnd(); +- } +- struct.setColumn_metadataIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 14: // GC_GRACE_SECONDS +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.gc_grace_seconds = iprot.readI32(); +- struct.setGc_grace_secondsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 15: // DEFAULT_VALIDATION_CLASS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.default_validation_class = iprot.readString(); +- struct.setDefault_validation_classIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 16: // ID +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.id = iprot.readI32(); +- struct.setIdIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 17: // MIN_COMPACTION_THRESHOLD +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.min_compaction_threshold = iprot.readI32(); +- struct.setMin_compaction_thresholdIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 18: // MAX_COMPACTION_THRESHOLD +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.max_compaction_threshold = iprot.readI32(); +- struct.setMax_compaction_thresholdIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 26: // KEY_VALIDATION_CLASS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key_validation_class = iprot.readString(); +- struct.setKey_validation_classIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 28: // KEY_ALIAS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key_alias = iprot.readBinary(); +- struct.setKey_aliasIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 29: // COMPACTION_STRATEGY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.compaction_strategy = iprot.readString(); +- struct.setCompaction_strategyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 30: // COMPACTION_STRATEGY_OPTIONS +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map113 = iprot.readMapBegin(); +- struct.compaction_strategy_options = new HashMap(2*_map113.size); +- for (int _i114 = 0; _i114 < _map113.size; ++_i114) +- { +- String _key115; +- String _val116; +- _key115 = iprot.readString(); +- _val116 = iprot.readString(); +- struct.compaction_strategy_options.put(_key115, _val116); +- } +- iprot.readMapEnd(); +- } +- struct.setCompaction_strategy_optionsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 32: // COMPRESSION_OPTIONS +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map117 = iprot.readMapBegin(); +- struct.compression_options = new HashMap(2*_map117.size); +- for (int _i118 = 0; _i118 < _map117.size; ++_i118) +- { +- String _key119; +- String _val120; +- _key119 = iprot.readString(); +- _val120 = iprot.readString(); +- struct.compression_options.put(_key119, _val120); +- } +- iprot.readMapEnd(); +- } +- struct.setCompression_optionsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 33: // BLOOM_FILTER_FP_CHANCE +- if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { +- struct.bloom_filter_fp_chance = iprot.readDouble(); +- struct.setBloom_filter_fp_chanceIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 34: // CACHING +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.caching = iprot.readString(); +- struct.setCachingIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 37: // DCLOCAL_READ_REPAIR_CHANCE +- if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { +- struct.dclocal_read_repair_chance = iprot.readDouble(); +- struct.setDclocal_read_repair_chanceIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 39: // MEMTABLE_FLUSH_PERIOD_IN_MS +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.memtable_flush_period_in_ms = iprot.readI32(); +- struct.setMemtable_flush_period_in_msIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 40: // DEFAULT_TIME_TO_LIVE +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.default_time_to_live = iprot.readI32(); +- struct.setDefault_time_to_liveIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 42: // SPECULATIVE_RETRY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.speculative_retry = iprot.readString(); +- struct.setSpeculative_retryIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 43: // TRIGGERS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list121 = iprot.readListBegin(); +- struct.triggers = new ArrayList(_list121.size); +- for (int _i122 = 0; _i122 < _list121.size; ++_i122) +- { +- TriggerDef _elem123; +- _elem123 = new TriggerDef(); +- _elem123.read(iprot); +- struct.triggers.add(_elem123); +- } +- iprot.readListEnd(); +- } +- struct.setTriggersIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 44: // CELLS_PER_ROW_TO_CACHE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.cells_per_row_to_cache = iprot.readString(); +- struct.setCells_per_row_to_cacheIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 45: // MIN_INDEX_INTERVAL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.min_index_interval = iprot.readI32(); +- struct.setMin_index_intervalIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 46: // MAX_INDEX_INTERVAL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.max_index_interval = iprot.readI32(); +- struct.setMax_index_intervalIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 9: // ROW_CACHE_SIZE +- if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { +- struct.row_cache_size = iprot.readDouble(); +- struct.setRow_cache_sizeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 11: // KEY_CACHE_SIZE +- if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { +- struct.key_cache_size = iprot.readDouble(); +- struct.setKey_cache_sizeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 19: // ROW_CACHE_SAVE_PERIOD_IN_SECONDS +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.row_cache_save_period_in_seconds = iprot.readI32(); +- struct.setRow_cache_save_period_in_secondsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 20: // KEY_CACHE_SAVE_PERIOD_IN_SECONDS +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.key_cache_save_period_in_seconds = iprot.readI32(); +- struct.setKey_cache_save_period_in_secondsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 21: // MEMTABLE_FLUSH_AFTER_MINS +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.memtable_flush_after_mins = iprot.readI32(); +- struct.setMemtable_flush_after_minsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 22: // MEMTABLE_THROUGHPUT_IN_MB +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.memtable_throughput_in_mb = iprot.readI32(); +- struct.setMemtable_throughput_in_mbIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 23: // MEMTABLE_OPERATIONS_IN_MILLIONS +- if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { +- struct.memtable_operations_in_millions = iprot.readDouble(); +- struct.setMemtable_operations_in_millionsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 24: // REPLICATE_ON_WRITE +- if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { +- struct.replicate_on_write = iprot.readBool(); +- struct.setReplicate_on_writeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 25: // MERGE_SHARDS_CHANCE +- if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { +- struct.merge_shards_chance = iprot.readDouble(); +- struct.setMerge_shards_chanceIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 27: // ROW_CACHE_PROVIDER +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.row_cache_provider = iprot.readString(); +- struct.setRow_cache_providerIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 31: // ROW_CACHE_KEYS_TO_SAVE +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.row_cache_keys_to_save = iprot.readI32(); +- struct.setRow_cache_keys_to_saveIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 38: // POPULATE_IO_CACHE_ON_FLUSH +- if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { +- struct.populate_io_cache_on_flush = iprot.readBool(); +- struct.setPopulate_io_cache_on_flushIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 41: // INDEX_INTERVAL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.index_interval = iprot.readI32(); +- struct.setIndex_intervalIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, CfDef struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.keyspace != null) { +- oprot.writeFieldBegin(KEYSPACE_FIELD_DESC); +- oprot.writeString(struct.keyspace); +- oprot.writeFieldEnd(); +- } +- if (struct.name != null) { +- oprot.writeFieldBegin(NAME_FIELD_DESC); +- oprot.writeString(struct.name); +- oprot.writeFieldEnd(); +- } +- if (struct.column_type != null) { +- if (struct.isSetColumn_type()) { +- oprot.writeFieldBegin(COLUMN_TYPE_FIELD_DESC); +- oprot.writeString(struct.column_type); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.comparator_type != null) { +- if (struct.isSetComparator_type()) { +- oprot.writeFieldBegin(COMPARATOR_TYPE_FIELD_DESC); +- oprot.writeString(struct.comparator_type); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.subcomparator_type != null) { +- if (struct.isSetSubcomparator_type()) { +- oprot.writeFieldBegin(SUBCOMPARATOR_TYPE_FIELD_DESC); +- oprot.writeString(struct.subcomparator_type); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.comment != null) { +- if (struct.isSetComment()) { +- oprot.writeFieldBegin(COMMENT_FIELD_DESC); +- oprot.writeString(struct.comment); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetRow_cache_size()) { +- oprot.writeFieldBegin(ROW_CACHE_SIZE_FIELD_DESC); +- oprot.writeDouble(struct.row_cache_size); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetKey_cache_size()) { +- oprot.writeFieldBegin(KEY_CACHE_SIZE_FIELD_DESC); +- oprot.writeDouble(struct.key_cache_size); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetRead_repair_chance()) { +- oprot.writeFieldBegin(READ_REPAIR_CHANCE_FIELD_DESC); +- oprot.writeDouble(struct.read_repair_chance); +- oprot.writeFieldEnd(); +- } +- if (struct.column_metadata != null) { +- if (struct.isSetColumn_metadata()) { +- oprot.writeFieldBegin(COLUMN_METADATA_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.column_metadata.size())); +- for (ColumnDef _iter124 : struct.column_metadata) +- { +- _iter124.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetGc_grace_seconds()) { +- oprot.writeFieldBegin(GC_GRACE_SECONDS_FIELD_DESC); +- oprot.writeI32(struct.gc_grace_seconds); +- oprot.writeFieldEnd(); +- } +- if (struct.default_validation_class != null) { +- if (struct.isSetDefault_validation_class()) { +- oprot.writeFieldBegin(DEFAULT_VALIDATION_CLASS_FIELD_DESC); +- oprot.writeString(struct.default_validation_class); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetId()) { +- oprot.writeFieldBegin(ID_FIELD_DESC); +- oprot.writeI32(struct.id); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetMin_compaction_threshold()) { +- oprot.writeFieldBegin(MIN_COMPACTION_THRESHOLD_FIELD_DESC); +- oprot.writeI32(struct.min_compaction_threshold); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetMax_compaction_threshold()) { +- oprot.writeFieldBegin(MAX_COMPACTION_THRESHOLD_FIELD_DESC); +- oprot.writeI32(struct.max_compaction_threshold); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetRow_cache_save_period_in_seconds()) { +- oprot.writeFieldBegin(ROW_CACHE_SAVE_PERIOD_IN_SECONDS_FIELD_DESC); +- oprot.writeI32(struct.row_cache_save_period_in_seconds); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetKey_cache_save_period_in_seconds()) { +- oprot.writeFieldBegin(KEY_CACHE_SAVE_PERIOD_IN_SECONDS_FIELD_DESC); +- oprot.writeI32(struct.key_cache_save_period_in_seconds); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetMemtable_flush_after_mins()) { +- oprot.writeFieldBegin(MEMTABLE_FLUSH_AFTER_MINS_FIELD_DESC); +- oprot.writeI32(struct.memtable_flush_after_mins); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetMemtable_throughput_in_mb()) { +- oprot.writeFieldBegin(MEMTABLE_THROUGHPUT_IN_MB_FIELD_DESC); +- oprot.writeI32(struct.memtable_throughput_in_mb); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetMemtable_operations_in_millions()) { +- oprot.writeFieldBegin(MEMTABLE_OPERATIONS_IN_MILLIONS_FIELD_DESC); +- oprot.writeDouble(struct.memtable_operations_in_millions); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetReplicate_on_write()) { +- oprot.writeFieldBegin(REPLICATE_ON_WRITE_FIELD_DESC); +- oprot.writeBool(struct.replicate_on_write); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetMerge_shards_chance()) { +- oprot.writeFieldBegin(MERGE_SHARDS_CHANCE_FIELD_DESC); +- oprot.writeDouble(struct.merge_shards_chance); +- oprot.writeFieldEnd(); +- } +- if (struct.key_validation_class != null) { +- if (struct.isSetKey_validation_class()) { +- oprot.writeFieldBegin(KEY_VALIDATION_CLASS_FIELD_DESC); +- oprot.writeString(struct.key_validation_class); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.row_cache_provider != null) { +- if (struct.isSetRow_cache_provider()) { +- oprot.writeFieldBegin(ROW_CACHE_PROVIDER_FIELD_DESC); +- oprot.writeString(struct.row_cache_provider); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.key_alias != null) { +- if (struct.isSetKey_alias()) { +- oprot.writeFieldBegin(KEY_ALIAS_FIELD_DESC); +- oprot.writeBinary(struct.key_alias); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.compaction_strategy != null) { +- if (struct.isSetCompaction_strategy()) { +- oprot.writeFieldBegin(COMPACTION_STRATEGY_FIELD_DESC); +- oprot.writeString(struct.compaction_strategy); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.compaction_strategy_options != null) { +- if (struct.isSetCompaction_strategy_options()) { +- oprot.writeFieldBegin(COMPACTION_STRATEGY_OPTIONS_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.compaction_strategy_options.size())); +- for (Map.Entry _iter125 : struct.compaction_strategy_options.entrySet()) +- { +- oprot.writeString(_iter125.getKey()); +- oprot.writeString(_iter125.getValue()); +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetRow_cache_keys_to_save()) { +- oprot.writeFieldBegin(ROW_CACHE_KEYS_TO_SAVE_FIELD_DESC); +- oprot.writeI32(struct.row_cache_keys_to_save); +- oprot.writeFieldEnd(); +- } +- if (struct.compression_options != null) { +- if (struct.isSetCompression_options()) { +- oprot.writeFieldBegin(COMPRESSION_OPTIONS_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.compression_options.size())); +- for (Map.Entry _iter126 : struct.compression_options.entrySet()) +- { +- oprot.writeString(_iter126.getKey()); +- oprot.writeString(_iter126.getValue()); +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetBloom_filter_fp_chance()) { +- oprot.writeFieldBegin(BLOOM_FILTER_FP_CHANCE_FIELD_DESC); +- oprot.writeDouble(struct.bloom_filter_fp_chance); +- oprot.writeFieldEnd(); +- } +- if (struct.caching != null) { +- if (struct.isSetCaching()) { +- oprot.writeFieldBegin(CACHING_FIELD_DESC); +- oprot.writeString(struct.caching); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetDclocal_read_repair_chance()) { +- oprot.writeFieldBegin(DCLOCAL_READ_REPAIR_CHANCE_FIELD_DESC); +- oprot.writeDouble(struct.dclocal_read_repair_chance); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetPopulate_io_cache_on_flush()) { +- oprot.writeFieldBegin(POPULATE_IO_CACHE_ON_FLUSH_FIELD_DESC); +- oprot.writeBool(struct.populate_io_cache_on_flush); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetMemtable_flush_period_in_ms()) { +- oprot.writeFieldBegin(MEMTABLE_FLUSH_PERIOD_IN_MS_FIELD_DESC); +- oprot.writeI32(struct.memtable_flush_period_in_ms); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetDefault_time_to_live()) { +- oprot.writeFieldBegin(DEFAULT_TIME_TO_LIVE_FIELD_DESC); +- oprot.writeI32(struct.default_time_to_live); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetIndex_interval()) { +- oprot.writeFieldBegin(INDEX_INTERVAL_FIELD_DESC); +- oprot.writeI32(struct.index_interval); +- oprot.writeFieldEnd(); +- } +- if (struct.speculative_retry != null) { +- if (struct.isSetSpeculative_retry()) { +- oprot.writeFieldBegin(SPECULATIVE_RETRY_FIELD_DESC); +- oprot.writeString(struct.speculative_retry); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.triggers != null) { +- if (struct.isSetTriggers()) { +- oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); +- for (TriggerDef _iter127 : struct.triggers) +- { +- _iter127.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- if (struct.cells_per_row_to_cache != null) { +- if (struct.isSetCells_per_row_to_cache()) { +- oprot.writeFieldBegin(CELLS_PER_ROW_TO_CACHE_FIELD_DESC); +- oprot.writeString(struct.cells_per_row_to_cache); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetMin_index_interval()) { +- oprot.writeFieldBegin(MIN_INDEX_INTERVAL_FIELD_DESC); +- oprot.writeI32(struct.min_index_interval); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetMax_index_interval()) { +- oprot.writeFieldBegin(MAX_INDEX_INTERVAL_FIELD_DESC); +- oprot.writeI32(struct.max_index_interval); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class CfDefTupleSchemeFactory implements SchemeFactory { +- public CfDefTupleScheme getScheme() { +- return new CfDefTupleScheme(); +- } +- } +- +- private static class CfDefTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, CfDef struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.keyspace); +- oprot.writeString(struct.name); +- BitSet optionals = new BitSet(); +- if (struct.isSetColumn_type()) { +- optionals.set(0); +- } +- if (struct.isSetComparator_type()) { +- optionals.set(1); +- } +- if (struct.isSetSubcomparator_type()) { +- optionals.set(2); +- } +- if (struct.isSetComment()) { +- optionals.set(3); +- } +- if (struct.isSetRead_repair_chance()) { +- optionals.set(4); +- } +- if (struct.isSetColumn_metadata()) { +- optionals.set(5); +- } +- if (struct.isSetGc_grace_seconds()) { +- optionals.set(6); +- } +- if (struct.isSetDefault_validation_class()) { +- optionals.set(7); +- } +- if (struct.isSetId()) { +- optionals.set(8); +- } +- if (struct.isSetMin_compaction_threshold()) { +- optionals.set(9); +- } +- if (struct.isSetMax_compaction_threshold()) { +- optionals.set(10); +- } +- if (struct.isSetKey_validation_class()) { +- optionals.set(11); +- } +- if (struct.isSetKey_alias()) { +- optionals.set(12); +- } +- if (struct.isSetCompaction_strategy()) { +- optionals.set(13); +- } +- if (struct.isSetCompaction_strategy_options()) { +- optionals.set(14); +- } +- if (struct.isSetCompression_options()) { +- optionals.set(15); +- } +- if (struct.isSetBloom_filter_fp_chance()) { +- optionals.set(16); +- } +- if (struct.isSetCaching()) { +- optionals.set(17); +- } +- if (struct.isSetDclocal_read_repair_chance()) { +- optionals.set(18); +- } +- if (struct.isSetMemtable_flush_period_in_ms()) { +- optionals.set(19); +- } +- if (struct.isSetDefault_time_to_live()) { +- optionals.set(20); +- } +- if (struct.isSetSpeculative_retry()) { +- optionals.set(21); +- } +- if (struct.isSetTriggers()) { +- optionals.set(22); +- } +- if (struct.isSetCells_per_row_to_cache()) { +- optionals.set(23); +- } +- if (struct.isSetMin_index_interval()) { +- optionals.set(24); +- } +- if (struct.isSetMax_index_interval()) { +- optionals.set(25); +- } +- if (struct.isSetRow_cache_size()) { +- optionals.set(26); +- } +- if (struct.isSetKey_cache_size()) { +- optionals.set(27); +- } +- if (struct.isSetRow_cache_save_period_in_seconds()) { +- optionals.set(28); +- } +- if (struct.isSetKey_cache_save_period_in_seconds()) { +- optionals.set(29); +- } +- if (struct.isSetMemtable_flush_after_mins()) { +- optionals.set(30); +- } +- if (struct.isSetMemtable_throughput_in_mb()) { +- optionals.set(31); +- } +- if (struct.isSetMemtable_operations_in_millions()) { +- optionals.set(32); +- } +- if (struct.isSetReplicate_on_write()) { +- optionals.set(33); +- } +- if (struct.isSetMerge_shards_chance()) { +- optionals.set(34); +- } +- if (struct.isSetRow_cache_provider()) { +- optionals.set(35); +- } +- if (struct.isSetRow_cache_keys_to_save()) { +- optionals.set(36); +- } +- if (struct.isSetPopulate_io_cache_on_flush()) { +- optionals.set(37); +- } +- if (struct.isSetIndex_interval()) { +- optionals.set(38); +- } +- oprot.writeBitSet(optionals, 39); +- if (struct.isSetColumn_type()) { +- oprot.writeString(struct.column_type); +- } +- if (struct.isSetComparator_type()) { +- oprot.writeString(struct.comparator_type); +- } +- if (struct.isSetSubcomparator_type()) { +- oprot.writeString(struct.subcomparator_type); +- } +- if (struct.isSetComment()) { +- oprot.writeString(struct.comment); +- } +- if (struct.isSetRead_repair_chance()) { +- oprot.writeDouble(struct.read_repair_chance); +- } +- if (struct.isSetColumn_metadata()) { +- { +- oprot.writeI32(struct.column_metadata.size()); +- for (ColumnDef _iter128 : struct.column_metadata) +- { +- _iter128.write(oprot); +- } +- } +- } +- if (struct.isSetGc_grace_seconds()) { +- oprot.writeI32(struct.gc_grace_seconds); +- } +- if (struct.isSetDefault_validation_class()) { +- oprot.writeString(struct.default_validation_class); +- } +- if (struct.isSetId()) { +- oprot.writeI32(struct.id); +- } +- if (struct.isSetMin_compaction_threshold()) { +- oprot.writeI32(struct.min_compaction_threshold); +- } +- if (struct.isSetMax_compaction_threshold()) { +- oprot.writeI32(struct.max_compaction_threshold); +- } +- if (struct.isSetKey_validation_class()) { +- oprot.writeString(struct.key_validation_class); +- } +- if (struct.isSetKey_alias()) { +- oprot.writeBinary(struct.key_alias); +- } +- if (struct.isSetCompaction_strategy()) { +- oprot.writeString(struct.compaction_strategy); +- } +- if (struct.isSetCompaction_strategy_options()) { +- { +- oprot.writeI32(struct.compaction_strategy_options.size()); +- for (Map.Entry _iter129 : struct.compaction_strategy_options.entrySet()) +- { +- oprot.writeString(_iter129.getKey()); +- oprot.writeString(_iter129.getValue()); +- } +- } +- } +- if (struct.isSetCompression_options()) { +- { +- oprot.writeI32(struct.compression_options.size()); +- for (Map.Entry _iter130 : struct.compression_options.entrySet()) +- { +- oprot.writeString(_iter130.getKey()); +- oprot.writeString(_iter130.getValue()); +- } +- } +- } +- if (struct.isSetBloom_filter_fp_chance()) { +- oprot.writeDouble(struct.bloom_filter_fp_chance); +- } +- if (struct.isSetCaching()) { +- oprot.writeString(struct.caching); +- } +- if (struct.isSetDclocal_read_repair_chance()) { +- oprot.writeDouble(struct.dclocal_read_repair_chance); +- } +- if (struct.isSetMemtable_flush_period_in_ms()) { +- oprot.writeI32(struct.memtable_flush_period_in_ms); +- } +- if (struct.isSetDefault_time_to_live()) { +- oprot.writeI32(struct.default_time_to_live); +- } +- if (struct.isSetSpeculative_retry()) { +- oprot.writeString(struct.speculative_retry); +- } +- if (struct.isSetTriggers()) { +- { +- oprot.writeI32(struct.triggers.size()); +- for (TriggerDef _iter131 : struct.triggers) +- { +- _iter131.write(oprot); +- } +- } +- } +- if (struct.isSetCells_per_row_to_cache()) { +- oprot.writeString(struct.cells_per_row_to_cache); +- } +- if (struct.isSetMin_index_interval()) { +- oprot.writeI32(struct.min_index_interval); +- } +- if (struct.isSetMax_index_interval()) { +- oprot.writeI32(struct.max_index_interval); +- } +- if (struct.isSetRow_cache_size()) { +- oprot.writeDouble(struct.row_cache_size); +- } +- if (struct.isSetKey_cache_size()) { +- oprot.writeDouble(struct.key_cache_size); +- } +- if (struct.isSetRow_cache_save_period_in_seconds()) { +- oprot.writeI32(struct.row_cache_save_period_in_seconds); +- } +- if (struct.isSetKey_cache_save_period_in_seconds()) { +- oprot.writeI32(struct.key_cache_save_period_in_seconds); +- } +- if (struct.isSetMemtable_flush_after_mins()) { +- oprot.writeI32(struct.memtable_flush_after_mins); +- } +- if (struct.isSetMemtable_throughput_in_mb()) { +- oprot.writeI32(struct.memtable_throughput_in_mb); +- } +- if (struct.isSetMemtable_operations_in_millions()) { +- oprot.writeDouble(struct.memtable_operations_in_millions); +- } +- if (struct.isSetReplicate_on_write()) { +- oprot.writeBool(struct.replicate_on_write); +- } +- if (struct.isSetMerge_shards_chance()) { +- oprot.writeDouble(struct.merge_shards_chance); +- } +- if (struct.isSetRow_cache_provider()) { +- oprot.writeString(struct.row_cache_provider); +- } +- if (struct.isSetRow_cache_keys_to_save()) { +- oprot.writeI32(struct.row_cache_keys_to_save); +- } +- if (struct.isSetPopulate_io_cache_on_flush()) { +- oprot.writeBool(struct.populate_io_cache_on_flush); +- } +- if (struct.isSetIndex_interval()) { +- oprot.writeI32(struct.index_interval); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, CfDef struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.keyspace = iprot.readString(); +- struct.setKeyspaceIsSet(true); +- struct.name = iprot.readString(); +- struct.setNameIsSet(true); +- BitSet incoming = iprot.readBitSet(39); +- if (incoming.get(0)) { +- struct.column_type = iprot.readString(); +- struct.setColumn_typeIsSet(true); +- } +- if (incoming.get(1)) { +- struct.comparator_type = iprot.readString(); +- struct.setComparator_typeIsSet(true); +- } +- if (incoming.get(2)) { +- struct.subcomparator_type = iprot.readString(); +- struct.setSubcomparator_typeIsSet(true); +- } +- if (incoming.get(3)) { +- struct.comment = iprot.readString(); +- struct.setCommentIsSet(true); +- } +- if (incoming.get(4)) { +- struct.read_repair_chance = iprot.readDouble(); +- struct.setRead_repair_chanceIsSet(true); +- } +- if (incoming.get(5)) { +- { +- org.apache.thrift.protocol.TList _list132 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.column_metadata = new ArrayList(_list132.size); +- for (int _i133 = 0; _i133 < _list132.size; ++_i133) +- { +- ColumnDef _elem134; +- _elem134 = new ColumnDef(); +- _elem134.read(iprot); +- struct.column_metadata.add(_elem134); +- } +- } +- struct.setColumn_metadataIsSet(true); +- } +- if (incoming.get(6)) { +- struct.gc_grace_seconds = iprot.readI32(); +- struct.setGc_grace_secondsIsSet(true); +- } +- if (incoming.get(7)) { +- struct.default_validation_class = iprot.readString(); +- struct.setDefault_validation_classIsSet(true); +- } +- if (incoming.get(8)) { +- struct.id = iprot.readI32(); +- struct.setIdIsSet(true); +- } +- if (incoming.get(9)) { +- struct.min_compaction_threshold = iprot.readI32(); +- struct.setMin_compaction_thresholdIsSet(true); +- } +- if (incoming.get(10)) { +- struct.max_compaction_threshold = iprot.readI32(); +- struct.setMax_compaction_thresholdIsSet(true); +- } +- if (incoming.get(11)) { +- struct.key_validation_class = iprot.readString(); +- struct.setKey_validation_classIsSet(true); +- } +- if (incoming.get(12)) { +- struct.key_alias = iprot.readBinary(); +- struct.setKey_aliasIsSet(true); +- } +- if (incoming.get(13)) { +- struct.compaction_strategy = iprot.readString(); +- struct.setCompaction_strategyIsSet(true); +- } +- if (incoming.get(14)) { +- { +- org.apache.thrift.protocol.TMap _map135 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.compaction_strategy_options = new HashMap(2*_map135.size); +- for (int _i136 = 0; _i136 < _map135.size; ++_i136) +- { +- String _key137; +- String _val138; +- _key137 = iprot.readString(); +- _val138 = iprot.readString(); +- struct.compaction_strategy_options.put(_key137, _val138); +- } +- } +- struct.setCompaction_strategy_optionsIsSet(true); +- } +- if (incoming.get(15)) { +- { +- org.apache.thrift.protocol.TMap _map139 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.compression_options = new HashMap(2*_map139.size); +- for (int _i140 = 0; _i140 < _map139.size; ++_i140) +- { +- String _key141; +- String _val142; +- _key141 = iprot.readString(); +- _val142 = iprot.readString(); +- struct.compression_options.put(_key141, _val142); +- } +- } +- struct.setCompression_optionsIsSet(true); +- } +- if (incoming.get(16)) { +- struct.bloom_filter_fp_chance = iprot.readDouble(); +- struct.setBloom_filter_fp_chanceIsSet(true); +- } +- if (incoming.get(17)) { +- struct.caching = iprot.readString(); +- struct.setCachingIsSet(true); +- } +- if (incoming.get(18)) { +- struct.dclocal_read_repair_chance = iprot.readDouble(); +- struct.setDclocal_read_repair_chanceIsSet(true); +- } +- if (incoming.get(19)) { +- struct.memtable_flush_period_in_ms = iprot.readI32(); +- struct.setMemtable_flush_period_in_msIsSet(true); +- } +- if (incoming.get(20)) { +- struct.default_time_to_live = iprot.readI32(); +- struct.setDefault_time_to_liveIsSet(true); +- } +- if (incoming.get(21)) { +- struct.speculative_retry = iprot.readString(); +- struct.setSpeculative_retryIsSet(true); +- } +- if (incoming.get(22)) { +- { +- org.apache.thrift.protocol.TList _list143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.triggers = new ArrayList(_list143.size); +- for (int _i144 = 0; _i144 < _list143.size; ++_i144) +- { +- TriggerDef _elem145; +- _elem145 = new TriggerDef(); +- _elem145.read(iprot); +- struct.triggers.add(_elem145); +- } +- } +- struct.setTriggersIsSet(true); +- } +- if (incoming.get(23)) { +- struct.cells_per_row_to_cache = iprot.readString(); +- struct.setCells_per_row_to_cacheIsSet(true); +- } +- if (incoming.get(24)) { +- struct.min_index_interval = iprot.readI32(); +- struct.setMin_index_intervalIsSet(true); +- } +- if (incoming.get(25)) { +- struct.max_index_interval = iprot.readI32(); +- struct.setMax_index_intervalIsSet(true); +- } +- if (incoming.get(26)) { +- struct.row_cache_size = iprot.readDouble(); +- struct.setRow_cache_sizeIsSet(true); +- } +- if (incoming.get(27)) { +- struct.key_cache_size = iprot.readDouble(); +- struct.setKey_cache_sizeIsSet(true); +- } +- if (incoming.get(28)) { +- struct.row_cache_save_period_in_seconds = iprot.readI32(); +- struct.setRow_cache_save_period_in_secondsIsSet(true); +- } +- if (incoming.get(29)) { +- struct.key_cache_save_period_in_seconds = iprot.readI32(); +- struct.setKey_cache_save_period_in_secondsIsSet(true); +- } +- if (incoming.get(30)) { +- struct.memtable_flush_after_mins = iprot.readI32(); +- struct.setMemtable_flush_after_minsIsSet(true); +- } +- if (incoming.get(31)) { +- struct.memtable_throughput_in_mb = iprot.readI32(); +- struct.setMemtable_throughput_in_mbIsSet(true); +- } +- if (incoming.get(32)) { +- struct.memtable_operations_in_millions = iprot.readDouble(); +- struct.setMemtable_operations_in_millionsIsSet(true); +- } +- if (incoming.get(33)) { +- struct.replicate_on_write = iprot.readBool(); +- struct.setReplicate_on_writeIsSet(true); +- } +- if (incoming.get(34)) { +- struct.merge_shards_chance = iprot.readDouble(); +- struct.setMerge_shards_chanceIsSet(true); +- } +- if (incoming.get(35)) { +- struct.row_cache_provider = iprot.readString(); +- struct.setRow_cache_providerIsSet(true); +- } +- if (incoming.get(36)) { +- struct.row_cache_keys_to_save = iprot.readI32(); +- struct.setRow_cache_keys_to_saveIsSet(true); +- } +- if (incoming.get(37)) { +- struct.populate_io_cache_on_flush = iprot.readBool(); +- struct.setPopulate_io_cache_on_flushIsSet(true); +- } +- if (incoming.get(38)) { +- struct.index_interval = iprot.readI32(); +- struct.setIndex_intervalIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CfSplit.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CfSplit.java +deleted file mode 100644 +index f765b87..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CfSplit.java ++++ /dev/null +@@ -1,614 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Represents input splits used by hadoop ColumnFamilyRecordReaders +- */ +-public class CfSplit implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CfSplit"); +- +- private static final org.apache.thrift.protocol.TField START_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("start_token", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField END_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("end_token", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField ROW_COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("row_count", org.apache.thrift.protocol.TType.I64, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new CfSplitStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new CfSplitTupleSchemeFactory()); +- } +- +- public String start_token; // required +- public String end_token; // required +- public long row_count; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- START_TOKEN((short)1, "start_token"), +- END_TOKEN((short)2, "end_token"), +- ROW_COUNT((short)3, "row_count"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // START_TOKEN +- return START_TOKEN; +- case 2: // END_TOKEN +- return END_TOKEN; +- case 3: // ROW_COUNT +- return ROW_COUNT; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __ROW_COUNT_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.START_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("start_token", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.END_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("end_token", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.ROW_COUNT, new org.apache.thrift.meta_data.FieldMetaData("row_count", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CfSplit.class, metaDataMap); +- } +- +- public CfSplit() { +- } +- +- public CfSplit( +- String start_token, +- String end_token, +- long row_count) +- { +- this(); +- this.start_token = start_token; +- this.end_token = end_token; +- this.row_count = row_count; +- setRow_countIsSet(true); +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public CfSplit(CfSplit other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetStart_token()) { +- this.start_token = other.start_token; +- } +- if (other.isSetEnd_token()) { +- this.end_token = other.end_token; +- } +- this.row_count = other.row_count; +- } +- +- public CfSplit deepCopy() { +- return new CfSplit(this); +- } +- +- @Override +- public void clear() { +- this.start_token = null; +- this.end_token = null; +- setRow_countIsSet(false); +- this.row_count = 0; +- } +- +- public String getStart_token() { +- return this.start_token; +- } +- +- public CfSplit setStart_token(String start_token) { +- this.start_token = start_token; +- return this; +- } +- +- public void unsetStart_token() { +- this.start_token = null; +- } +- +- /** Returns true if field start_token is set (has been assigned a value) and false otherwise */ +- public boolean isSetStart_token() { +- return this.start_token != null; +- } +- +- public void setStart_tokenIsSet(boolean value) { +- if (!value) { +- this.start_token = null; +- } +- } +- +- public String getEnd_token() { +- return this.end_token; +- } +- +- public CfSplit setEnd_token(String end_token) { +- this.end_token = end_token; +- return this; +- } +- +- public void unsetEnd_token() { +- this.end_token = null; +- } +- +- /** Returns true if field end_token is set (has been assigned a value) and false otherwise */ +- public boolean isSetEnd_token() { +- return this.end_token != null; +- } +- +- public void setEnd_tokenIsSet(boolean value) { +- if (!value) { +- this.end_token = null; +- } +- } +- +- public long getRow_count() { +- return this.row_count; +- } +- +- public CfSplit setRow_count(long row_count) { +- this.row_count = row_count; +- setRow_countIsSet(true); +- return this; +- } +- +- public void unsetRow_count() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ROW_COUNT_ISSET_ID); +- } +- +- /** Returns true if field row_count is set (has been assigned a value) and false otherwise */ +- public boolean isSetRow_count() { +- return EncodingUtils.testBit(__isset_bitfield, __ROW_COUNT_ISSET_ID); +- } +- +- public void setRow_countIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ROW_COUNT_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case START_TOKEN: +- if (value == null) { +- unsetStart_token(); +- } else { +- setStart_token((String)value); +- } +- break; +- +- case END_TOKEN: +- if (value == null) { +- unsetEnd_token(); +- } else { +- setEnd_token((String)value); +- } +- break; +- +- case ROW_COUNT: +- if (value == null) { +- unsetRow_count(); +- } else { +- setRow_count((Long)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case START_TOKEN: +- return getStart_token(); +- +- case END_TOKEN: +- return getEnd_token(); +- +- case ROW_COUNT: +- return Long.valueOf(getRow_count()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case START_TOKEN: +- return isSetStart_token(); +- case END_TOKEN: +- return isSetEnd_token(); +- case ROW_COUNT: +- return isSetRow_count(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof CfSplit) +- return this.equals((CfSplit)that); +- return false; +- } +- +- public boolean equals(CfSplit that) { +- if (that == null) +- return false; +- +- boolean this_present_start_token = true && this.isSetStart_token(); +- boolean that_present_start_token = true && that.isSetStart_token(); +- if (this_present_start_token || that_present_start_token) { +- if (!(this_present_start_token && that_present_start_token)) +- return false; +- if (!this.start_token.equals(that.start_token)) +- return false; +- } +- +- boolean this_present_end_token = true && this.isSetEnd_token(); +- boolean that_present_end_token = true && that.isSetEnd_token(); +- if (this_present_end_token || that_present_end_token) { +- if (!(this_present_end_token && that_present_end_token)) +- return false; +- if (!this.end_token.equals(that.end_token)) +- return false; +- } +- +- boolean this_present_row_count = true; +- boolean that_present_row_count = true; +- if (this_present_row_count || that_present_row_count) { +- if (!(this_present_row_count && that_present_row_count)) +- return false; +- if (this.row_count != that.row_count) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_start_token = true && (isSetStart_token()); +- builder.append(present_start_token); +- if (present_start_token) +- builder.append(start_token); +- +- boolean present_end_token = true && (isSetEnd_token()); +- builder.append(present_end_token); +- if (present_end_token) +- builder.append(end_token); +- +- boolean present_row_count = true; +- builder.append(present_row_count); +- if (present_row_count) +- builder.append(row_count); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(CfSplit other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetStart_token()).compareTo(other.isSetStart_token()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStart_token()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start_token, other.start_token); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetEnd_token()).compareTo(other.isSetEnd_token()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetEnd_token()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.end_token, other.end_token); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRow_count()).compareTo(other.isSetRow_count()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRow_count()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row_count, other.row_count); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("CfSplit("); +- boolean first = true; +- +- sb.append("start_token:"); +- if (this.start_token == null) { +- sb.append("null"); +- } else { +- sb.append(this.start_token); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("end_token:"); +- if (this.end_token == null) { +- sb.append("null"); +- } else { +- sb.append(this.end_token); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("row_count:"); +- sb.append(this.row_count); +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (start_token == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'start_token' was not present! Struct: " + toString()); +- } +- if (end_token == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'end_token' was not present! Struct: " + toString()); +- } +- // alas, we cannot check 'row_count' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class CfSplitStandardSchemeFactory implements SchemeFactory { +- public CfSplitStandardScheme getScheme() { +- return new CfSplitStandardScheme(); +- } +- } +- +- private static class CfSplitStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, CfSplit struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // START_TOKEN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.start_token = iprot.readString(); +- struct.setStart_tokenIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // END_TOKEN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.end_token = iprot.readString(); +- struct.setEnd_tokenIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // ROW_COUNT +- if (schemeField.type == org.apache.thrift.protocol.TType.I64) { +- struct.row_count = iprot.readI64(); +- struct.setRow_countIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetRow_count()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'row_count' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, CfSplit struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.start_token != null) { +- oprot.writeFieldBegin(START_TOKEN_FIELD_DESC); +- oprot.writeString(struct.start_token); +- oprot.writeFieldEnd(); +- } +- if (struct.end_token != null) { +- oprot.writeFieldBegin(END_TOKEN_FIELD_DESC); +- oprot.writeString(struct.end_token); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldBegin(ROW_COUNT_FIELD_DESC); +- oprot.writeI64(struct.row_count); +- oprot.writeFieldEnd(); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class CfSplitTupleSchemeFactory implements SchemeFactory { +- public CfSplitTupleScheme getScheme() { +- return new CfSplitTupleScheme(); +- } +- } +- +- private static class CfSplitTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, CfSplit struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.start_token); +- oprot.writeString(struct.end_token); +- oprot.writeI64(struct.row_count); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, CfSplit struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.start_token = iprot.readString(); +- struct.setStart_tokenIsSet(true); +- struct.end_token = iprot.readString(); +- struct.setEnd_tokenIsSet(true); +- struct.row_count = iprot.readI64(); +- struct.setRow_countIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/Column.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/Column.java +deleted file mode 100644 +index b516a38..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/Column.java ++++ /dev/null +@@ -1,754 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Basic unit of data within a ColumnFamily. +- * @param name, the name by which this column is set and retrieved. Maximum 64KB long. +- * @param value. The data associated with the name. Maximum 2GB long, but in practice you should limit it to small numbers of MB (since Thrift must read the full value into memory to operate on it). +- * @param timestamp. The timestamp is used for conflict detection/resolution when two columns with same name need to be compared. +- * @param ttl. An optional, positive delay (in seconds) after which the column will be automatically deleted. +- */ +-public class Column implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Column"); +- +- private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3); +- private static final org.apache.thrift.protocol.TField TTL_FIELD_DESC = new org.apache.thrift.protocol.TField("ttl", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new ColumnStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new ColumnTupleSchemeFactory()); +- } +- +- public ByteBuffer name; // required +- public ByteBuffer value; // optional +- public long timestamp; // optional +- public int ttl; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- NAME((short)1, "name"), +- VALUE((short)2, "value"), +- TIMESTAMP((short)3, "timestamp"), +- TTL((short)4, "ttl"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // NAME +- return NAME; +- case 2: // VALUE +- return VALUE; +- case 3: // TIMESTAMP +- return TIMESTAMP; +- case 4: // TTL +- return TTL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __TIMESTAMP_ISSET_ID = 0; +- private static final int __TTL_ISSET_ID = 1; +- private byte __isset_bitfield = 0; +- private _Fields optionals[] = {_Fields.VALUE,_Fields.TIMESTAMP,_Fields.TTL}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); +- tmpMap.put(_Fields.TTL, new org.apache.thrift.meta_data.FieldMetaData("ttl", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Column.class, metaDataMap); +- } +- +- public Column() { +- } +- +- public Column( +- ByteBuffer name) +- { +- this(); +- this.name = name; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public Column(Column other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetName()) { +- this.name = org.apache.thrift.TBaseHelper.copyBinary(other.name); +-; +- } +- if (other.isSetValue()) { +- this.value = org.apache.thrift.TBaseHelper.copyBinary(other.value); +-; +- } +- this.timestamp = other.timestamp; +- this.ttl = other.ttl; +- } +- +- public Column deepCopy() { +- return new Column(this); +- } +- +- @Override +- public void clear() { +- this.name = null; +- this.value = null; +- setTimestampIsSet(false); +- this.timestamp = 0; +- setTtlIsSet(false); +- this.ttl = 0; +- } +- +- public byte[] getName() { +- setName(org.apache.thrift.TBaseHelper.rightSize(name)); +- return name == null ? null : name.array(); +- } +- +- public ByteBuffer bufferForName() { +- return name; +- } +- +- public Column setName(byte[] name) { +- setName(name == null ? (ByteBuffer)null : ByteBuffer.wrap(name)); +- return this; +- } +- +- public Column setName(ByteBuffer name) { +- this.name = name; +- return this; +- } +- +- public void unsetName() { +- this.name = null; +- } +- +- /** Returns true if field name is set (has been assigned a value) and false otherwise */ +- public boolean isSetName() { +- return this.name != null; +- } +- +- public void setNameIsSet(boolean value) { +- if (!value) { +- this.name = null; +- } +- } +- +- public byte[] getValue() { +- setValue(org.apache.thrift.TBaseHelper.rightSize(value)); +- return value == null ? null : value.array(); +- } +- +- public ByteBuffer bufferForValue() { +- return value; +- } +- +- public Column setValue(byte[] value) { +- setValue(value == null ? (ByteBuffer)null : ByteBuffer.wrap(value)); +- return this; +- } +- +- public Column setValue(ByteBuffer value) { +- this.value = value; +- return this; +- } +- +- public void unsetValue() { +- this.value = null; +- } +- +- /** Returns true if field value is set (has been assigned a value) and false otherwise */ +- public boolean isSetValue() { +- return this.value != null; +- } +- +- public void setValueIsSet(boolean value) { +- if (!value) { +- this.value = null; +- } +- } +- +- public long getTimestamp() { +- return this.timestamp; +- } +- +- public Column setTimestamp(long timestamp) { +- this.timestamp = timestamp; +- setTimestampIsSet(true); +- return this; +- } +- +- public void unsetTimestamp() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID); +- } +- +- /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */ +- public boolean isSetTimestamp() { +- return EncodingUtils.testBit(__isset_bitfield, __TIMESTAMP_ISSET_ID); +- } +- +- public void setTimestampIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value); +- } +- +- public int getTtl() { +- return this.ttl; +- } +- +- public Column setTtl(int ttl) { +- this.ttl = ttl; +- setTtlIsSet(true); +- return this; +- } +- +- public void unsetTtl() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TTL_ISSET_ID); +- } +- +- /** Returns true if field ttl is set (has been assigned a value) and false otherwise */ +- public boolean isSetTtl() { +- return EncodingUtils.testBit(__isset_bitfield, __TTL_ISSET_ID); +- } +- +- public void setTtlIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TTL_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case NAME: +- if (value == null) { +- unsetName(); +- } else { +- setName((ByteBuffer)value); +- } +- break; +- +- case VALUE: +- if (value == null) { +- unsetValue(); +- } else { +- setValue((ByteBuffer)value); +- } +- break; +- +- case TIMESTAMP: +- if (value == null) { +- unsetTimestamp(); +- } else { +- setTimestamp((Long)value); +- } +- break; +- +- case TTL: +- if (value == null) { +- unsetTtl(); +- } else { +- setTtl((Integer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case NAME: +- return getName(); +- +- case VALUE: +- return getValue(); +- +- case TIMESTAMP: +- return Long.valueOf(getTimestamp()); +- +- case TTL: +- return Integer.valueOf(getTtl()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case NAME: +- return isSetName(); +- case VALUE: +- return isSetValue(); +- case TIMESTAMP: +- return isSetTimestamp(); +- case TTL: +- return isSetTtl(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof Column) +- return this.equals((Column)that); +- return false; +- } +- +- public boolean equals(Column that) { +- if (that == null) +- return false; +- +- boolean this_present_name = true && this.isSetName(); +- boolean that_present_name = true && that.isSetName(); +- if (this_present_name || that_present_name) { +- if (!(this_present_name && that_present_name)) +- return false; +- if (!this.name.equals(that.name)) +- return false; +- } +- +- boolean this_present_value = true && this.isSetValue(); +- boolean that_present_value = true && that.isSetValue(); +- if (this_present_value || that_present_value) { +- if (!(this_present_value && that_present_value)) +- return false; +- if (!this.value.equals(that.value)) +- return false; +- } +- +- boolean this_present_timestamp = true && this.isSetTimestamp(); +- boolean that_present_timestamp = true && that.isSetTimestamp(); +- if (this_present_timestamp || that_present_timestamp) { +- if (!(this_present_timestamp && that_present_timestamp)) +- return false; +- if (this.timestamp != that.timestamp) +- return false; +- } +- +- boolean this_present_ttl = true && this.isSetTtl(); +- boolean that_present_ttl = true && that.isSetTtl(); +- if (this_present_ttl || that_present_ttl) { +- if (!(this_present_ttl && that_present_ttl)) +- return false; +- if (this.ttl != that.ttl) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_name = true && (isSetName()); +- builder.append(present_name); +- if (present_name) +- builder.append(name); +- +- boolean present_value = true && (isSetValue()); +- builder.append(present_value); +- if (present_value) +- builder.append(value); +- +- boolean present_timestamp = true && (isSetTimestamp()); +- builder.append(present_timestamp); +- if (present_timestamp) +- builder.append(timestamp); +- +- boolean present_ttl = true && (isSetTtl()); +- builder.append(present_ttl); +- if (present_ttl) +- builder.append(ttl); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(Column other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetName()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetValue()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTimestamp()).compareTo(other.isSetTimestamp()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTimestamp()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timestamp, other.timestamp); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetTtl()).compareTo(other.isSetTtl()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTtl()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ttl, other.ttl); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("Column("); +- boolean first = true; +- +- sb.append("name:"); +- if (this.name == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.name, sb); +- } +- first = false; +- if (isSetValue()) { +- if (!first) sb.append(", "); +- sb.append("value:"); +- if (this.value == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.value, sb); +- } +- first = false; +- } +- if (isSetTimestamp()) { +- if (!first) sb.append(", "); +- sb.append("timestamp:"); +- sb.append(this.timestamp); +- first = false; +- } +- if (isSetTtl()) { +- if (!first) sb.append(", "); +- sb.append("ttl:"); +- sb.append(this.ttl); +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (name == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class ColumnStandardSchemeFactory implements SchemeFactory { +- public ColumnStandardScheme getScheme() { +- return new ColumnStandardScheme(); +- } +- } +- +- private static class ColumnStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, Column struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.name = iprot.readBinary(); +- struct.setNameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // VALUE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.value = iprot.readBinary(); +- struct.setValueIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // TIMESTAMP +- if (schemeField.type == org.apache.thrift.protocol.TType.I64) { +- struct.timestamp = iprot.readI64(); +- struct.setTimestampIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // TTL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.ttl = iprot.readI32(); +- struct.setTtlIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, Column struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.name != null) { +- oprot.writeFieldBegin(NAME_FIELD_DESC); +- oprot.writeBinary(struct.name); +- oprot.writeFieldEnd(); +- } +- if (struct.value != null) { +- if (struct.isSetValue()) { +- oprot.writeFieldBegin(VALUE_FIELD_DESC); +- oprot.writeBinary(struct.value); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetTimestamp()) { +- oprot.writeFieldBegin(TIMESTAMP_FIELD_DESC); +- oprot.writeI64(struct.timestamp); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetTtl()) { +- oprot.writeFieldBegin(TTL_FIELD_DESC); +- oprot.writeI32(struct.ttl); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class ColumnTupleSchemeFactory implements SchemeFactory { +- public ColumnTupleScheme getScheme() { +- return new ColumnTupleScheme(); +- } +- } +- +- private static class ColumnTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, Column struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.name); +- BitSet optionals = new BitSet(); +- if (struct.isSetValue()) { +- optionals.set(0); +- } +- if (struct.isSetTimestamp()) { +- optionals.set(1); +- } +- if (struct.isSetTtl()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetValue()) { +- oprot.writeBinary(struct.value); +- } +- if (struct.isSetTimestamp()) { +- oprot.writeI64(struct.timestamp); +- } +- if (struct.isSetTtl()) { +- oprot.writeI32(struct.ttl); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, Column struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.name = iprot.readBinary(); +- struct.setNameIsSet(true); +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.value = iprot.readBinary(); +- struct.setValueIsSet(true); +- } +- if (incoming.get(1)) { +- struct.timestamp = iprot.readI64(); +- struct.setTimestampIsSet(true); +- } +- if (incoming.get(2)) { +- struct.ttl = iprot.readI32(); +- struct.setTtlIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnDef.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnDef.java +deleted file mode 100644 +index 951c967..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnDef.java ++++ /dev/null +@@ -1,915 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class ColumnDef implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnDef"); +- +- private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField VALIDATION_CLASS_FIELD_DESC = new org.apache.thrift.protocol.TField("validation_class", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField INDEX_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("index_type", org.apache.thrift.protocol.TType.I32, (short)3); +- private static final org.apache.thrift.protocol.TField INDEX_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("index_name", org.apache.thrift.protocol.TType.STRING, (short)4); +- private static final org.apache.thrift.protocol.TField INDEX_OPTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("index_options", org.apache.thrift.protocol.TType.MAP, (short)5); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new ColumnDefStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new ColumnDefTupleSchemeFactory()); +- } +- +- public ByteBuffer name; // required +- public String validation_class; // required +- /** +- * +- * @see IndexType +- */ +- public IndexType index_type; // optional +- public String index_name; // optional +- public Map index_options; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- NAME((short)1, "name"), +- VALIDATION_CLASS((short)2, "validation_class"), +- /** +- * +- * @see IndexType +- */ +- INDEX_TYPE((short)3, "index_type"), +- INDEX_NAME((short)4, "index_name"), +- INDEX_OPTIONS((short)5, "index_options"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // NAME +- return NAME; +- case 2: // VALIDATION_CLASS +- return VALIDATION_CLASS; +- case 3: // INDEX_TYPE +- return INDEX_TYPE; +- case 4: // INDEX_NAME +- return INDEX_NAME; +- case 5: // INDEX_OPTIONS +- return INDEX_OPTIONS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private _Fields optionals[] = {_Fields.INDEX_TYPE,_Fields.INDEX_NAME,_Fields.INDEX_OPTIONS}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.VALIDATION_CLASS, new org.apache.thrift.meta_data.FieldMetaData("validation_class", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.INDEX_TYPE, new org.apache.thrift.meta_data.FieldMetaData("index_type", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IndexType.class))); +- tmpMap.put(_Fields.INDEX_NAME, new org.apache.thrift.meta_data.FieldMetaData("index_name", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.INDEX_OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("index_options", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnDef.class, metaDataMap); +- } +- +- public ColumnDef() { +- } +- +- public ColumnDef( +- ByteBuffer name, +- String validation_class) +- { +- this(); +- this.name = name; +- this.validation_class = validation_class; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public ColumnDef(ColumnDef other) { +- if (other.isSetName()) { +- this.name = org.apache.thrift.TBaseHelper.copyBinary(other.name); +-; +- } +- if (other.isSetValidation_class()) { +- this.validation_class = other.validation_class; +- } +- if (other.isSetIndex_type()) { +- this.index_type = other.index_type; +- } +- if (other.isSetIndex_name()) { +- this.index_name = other.index_name; +- } +- if (other.isSetIndex_options()) { +- Map __this__index_options = new HashMap(other.index_options); +- this.index_options = __this__index_options; +- } +- } +- +- public ColumnDef deepCopy() { +- return new ColumnDef(this); +- } +- +- @Override +- public void clear() { +- this.name = null; +- this.validation_class = null; +- this.index_type = null; +- this.index_name = null; +- this.index_options = null; +- } +- +- public byte[] getName() { +- setName(org.apache.thrift.TBaseHelper.rightSize(name)); +- return name == null ? null : name.array(); +- } +- +- public ByteBuffer bufferForName() { +- return name; +- } +- +- public ColumnDef setName(byte[] name) { +- setName(name == null ? (ByteBuffer)null : ByteBuffer.wrap(name)); +- return this; +- } +- +- public ColumnDef setName(ByteBuffer name) { +- this.name = name; +- return this; +- } +- +- public void unsetName() { +- this.name = null; +- } +- +- /** Returns true if field name is set (has been assigned a value) and false otherwise */ +- public boolean isSetName() { +- return this.name != null; +- } +- +- public void setNameIsSet(boolean value) { +- if (!value) { +- this.name = null; +- } +- } +- +- public String getValidation_class() { +- return this.validation_class; +- } +- +- public ColumnDef setValidation_class(String validation_class) { +- this.validation_class = validation_class; +- return this; +- } +- +- public void unsetValidation_class() { +- this.validation_class = null; +- } +- +- /** Returns true if field validation_class is set (has been assigned a value) and false otherwise */ +- public boolean isSetValidation_class() { +- return this.validation_class != null; +- } +- +- public void setValidation_classIsSet(boolean value) { +- if (!value) { +- this.validation_class = null; +- } +- } +- +- /** +- * +- * @see IndexType +- */ +- public IndexType getIndex_type() { +- return this.index_type; +- } +- +- /** +- * +- * @see IndexType +- */ +- public ColumnDef setIndex_type(IndexType index_type) { +- this.index_type = index_type; +- return this; +- } +- +- public void unsetIndex_type() { +- this.index_type = null; +- } +- +- /** Returns true if field index_type is set (has been assigned a value) and false otherwise */ +- public boolean isSetIndex_type() { +- return this.index_type != null; +- } +- +- public void setIndex_typeIsSet(boolean value) { +- if (!value) { +- this.index_type = null; +- } +- } +- +- public String getIndex_name() { +- return this.index_name; +- } +- +- public ColumnDef setIndex_name(String index_name) { +- this.index_name = index_name; +- return this; +- } +- +- public void unsetIndex_name() { +- this.index_name = null; +- } +- +- /** Returns true if field index_name is set (has been assigned a value) and false otherwise */ +- public boolean isSetIndex_name() { +- return this.index_name != null; +- } +- +- public void setIndex_nameIsSet(boolean value) { +- if (!value) { +- this.index_name = null; +- } +- } +- +- public int getIndex_optionsSize() { +- return (this.index_options == null) ? 0 : this.index_options.size(); +- } +- +- public void putToIndex_options(String key, String val) { +- if (this.index_options == null) { +- this.index_options = new HashMap(); +- } +- this.index_options.put(key, val); +- } +- +- public Map getIndex_options() { +- return this.index_options; +- } +- +- public ColumnDef setIndex_options(Map index_options) { +- this.index_options = index_options; +- return this; +- } +- +- public void unsetIndex_options() { +- this.index_options = null; +- } +- +- /** Returns true if field index_options is set (has been assigned a value) and false otherwise */ +- public boolean isSetIndex_options() { +- return this.index_options != null; +- } +- +- public void setIndex_optionsIsSet(boolean value) { +- if (!value) { +- this.index_options = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case NAME: +- if (value == null) { +- unsetName(); +- } else { +- setName((ByteBuffer)value); +- } +- break; +- +- case VALIDATION_CLASS: +- if (value == null) { +- unsetValidation_class(); +- } else { +- setValidation_class((String)value); +- } +- break; +- +- case INDEX_TYPE: +- if (value == null) { +- unsetIndex_type(); +- } else { +- setIndex_type((IndexType)value); +- } +- break; +- +- case INDEX_NAME: +- if (value == null) { +- unsetIndex_name(); +- } else { +- setIndex_name((String)value); +- } +- break; +- +- case INDEX_OPTIONS: +- if (value == null) { +- unsetIndex_options(); +- } else { +- setIndex_options((Map)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case NAME: +- return getName(); +- +- case VALIDATION_CLASS: +- return getValidation_class(); +- +- case INDEX_TYPE: +- return getIndex_type(); +- +- case INDEX_NAME: +- return getIndex_name(); +- +- case INDEX_OPTIONS: +- return getIndex_options(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case NAME: +- return isSetName(); +- case VALIDATION_CLASS: +- return isSetValidation_class(); +- case INDEX_TYPE: +- return isSetIndex_type(); +- case INDEX_NAME: +- return isSetIndex_name(); +- case INDEX_OPTIONS: +- return isSetIndex_options(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof ColumnDef) +- return this.equals((ColumnDef)that); +- return false; +- } +- +- public boolean equals(ColumnDef that) { +- if (that == null) +- return false; +- +- boolean this_present_name = true && this.isSetName(); +- boolean that_present_name = true && that.isSetName(); +- if (this_present_name || that_present_name) { +- if (!(this_present_name && that_present_name)) +- return false; +- if (!this.name.equals(that.name)) +- return false; +- } +- +- boolean this_present_validation_class = true && this.isSetValidation_class(); +- boolean that_present_validation_class = true && that.isSetValidation_class(); +- if (this_present_validation_class || that_present_validation_class) { +- if (!(this_present_validation_class && that_present_validation_class)) +- return false; +- if (!this.validation_class.equals(that.validation_class)) +- return false; +- } +- +- boolean this_present_index_type = true && this.isSetIndex_type(); +- boolean that_present_index_type = true && that.isSetIndex_type(); +- if (this_present_index_type || that_present_index_type) { +- if (!(this_present_index_type && that_present_index_type)) +- return false; +- if (!this.index_type.equals(that.index_type)) +- return false; +- } +- +- boolean this_present_index_name = true && this.isSetIndex_name(); +- boolean that_present_index_name = true && that.isSetIndex_name(); +- if (this_present_index_name || that_present_index_name) { +- if (!(this_present_index_name && that_present_index_name)) +- return false; +- if (!this.index_name.equals(that.index_name)) +- return false; +- } +- +- boolean this_present_index_options = true && this.isSetIndex_options(); +- boolean that_present_index_options = true && that.isSetIndex_options(); +- if (this_present_index_options || that_present_index_options) { +- if (!(this_present_index_options && that_present_index_options)) +- return false; +- if (!this.index_options.equals(that.index_options)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_name = true && (isSetName()); +- builder.append(present_name); +- if (present_name) +- builder.append(name); +- +- boolean present_validation_class = true && (isSetValidation_class()); +- builder.append(present_validation_class); +- if (present_validation_class) +- builder.append(validation_class); +- +- boolean present_index_type = true && (isSetIndex_type()); +- builder.append(present_index_type); +- if (present_index_type) +- builder.append(index_type.getValue()); +- +- boolean present_index_name = true && (isSetIndex_name()); +- builder.append(present_index_name); +- if (present_index_name) +- builder.append(index_name); +- +- boolean present_index_options = true && (isSetIndex_options()); +- builder.append(present_index_options); +- if (present_index_options) +- builder.append(index_options); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(ColumnDef other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetName()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetValidation_class()).compareTo(other.isSetValidation_class()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetValidation_class()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validation_class, other.validation_class); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIndex_type()).compareTo(other.isSetIndex_type()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIndex_type()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_type, other.index_type); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIndex_name()).compareTo(other.isSetIndex_name()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIndex_name()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_name, other.index_name); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetIndex_options()).compareTo(other.isSetIndex_options()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetIndex_options()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_options, other.index_options); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("ColumnDef("); +- boolean first = true; +- +- sb.append("name:"); +- if (this.name == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.name, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("validation_class:"); +- if (this.validation_class == null) { +- sb.append("null"); +- } else { +- sb.append(this.validation_class); +- } +- first = false; +- if (isSetIndex_type()) { +- if (!first) sb.append(", "); +- sb.append("index_type:"); +- if (this.index_type == null) { +- sb.append("null"); +- } else { +- sb.append(this.index_type); +- } +- first = false; +- } +- if (isSetIndex_name()) { +- if (!first) sb.append(", "); +- sb.append("index_name:"); +- if (this.index_name == null) { +- sb.append("null"); +- } else { +- sb.append(this.index_name); +- } +- first = false; +- } +- if (isSetIndex_options()) { +- if (!first) sb.append(", "); +- sb.append("index_options:"); +- if (this.index_options == null) { +- sb.append("null"); +- } else { +- sb.append(this.index_options); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (name == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString()); +- } +- if (validation_class == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'validation_class' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class ColumnDefStandardSchemeFactory implements SchemeFactory { +- public ColumnDefStandardScheme getScheme() { +- return new ColumnDefStandardScheme(); +- } +- } +- +- private static class ColumnDefStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDef struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.name = iprot.readBinary(); +- struct.setNameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // VALIDATION_CLASS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.validation_class = iprot.readString(); +- struct.setValidation_classIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // INDEX_TYPE +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.index_type = IndexType.findByValue(iprot.readI32()); +- struct.setIndex_typeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // INDEX_NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.index_name = iprot.readString(); +- struct.setIndex_nameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 5: // INDEX_OPTIONS +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map90 = iprot.readMapBegin(); +- struct.index_options = new HashMap(2*_map90.size); +- for (int _i91 = 0; _i91 < _map90.size; ++_i91) +- { +- String _key92; +- String _val93; +- _key92 = iprot.readString(); +- _val93 = iprot.readString(); +- struct.index_options.put(_key92, _val93); +- } +- iprot.readMapEnd(); +- } +- struct.setIndex_optionsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnDef struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.name != null) { +- oprot.writeFieldBegin(NAME_FIELD_DESC); +- oprot.writeBinary(struct.name); +- oprot.writeFieldEnd(); +- } +- if (struct.validation_class != null) { +- oprot.writeFieldBegin(VALIDATION_CLASS_FIELD_DESC); +- oprot.writeString(struct.validation_class); +- oprot.writeFieldEnd(); +- } +- if (struct.index_type != null) { +- if (struct.isSetIndex_type()) { +- oprot.writeFieldBegin(INDEX_TYPE_FIELD_DESC); +- oprot.writeI32(struct.index_type.getValue()); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.index_name != null) { +- if (struct.isSetIndex_name()) { +- oprot.writeFieldBegin(INDEX_NAME_FIELD_DESC); +- oprot.writeString(struct.index_name); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.index_options != null) { +- if (struct.isSetIndex_options()) { +- oprot.writeFieldBegin(INDEX_OPTIONS_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.index_options.size())); +- for (Map.Entry _iter94 : struct.index_options.entrySet()) +- { +- oprot.writeString(_iter94.getKey()); +- oprot.writeString(_iter94.getValue()); +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class ColumnDefTupleSchemeFactory implements SchemeFactory { +- public ColumnDefTupleScheme getScheme() { +- return new ColumnDefTupleScheme(); +- } +- } +- +- private static class ColumnDefTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, ColumnDef struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.name); +- oprot.writeString(struct.validation_class); +- BitSet optionals = new BitSet(); +- if (struct.isSetIndex_type()) { +- optionals.set(0); +- } +- if (struct.isSetIndex_name()) { +- optionals.set(1); +- } +- if (struct.isSetIndex_options()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetIndex_type()) { +- oprot.writeI32(struct.index_type.getValue()); +- } +- if (struct.isSetIndex_name()) { +- oprot.writeString(struct.index_name); +- } +- if (struct.isSetIndex_options()) { +- { +- oprot.writeI32(struct.index_options.size()); +- for (Map.Entry _iter95 : struct.index_options.entrySet()) +- { +- oprot.writeString(_iter95.getKey()); +- oprot.writeString(_iter95.getValue()); +- } +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, ColumnDef struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.name = iprot.readBinary(); +- struct.setNameIsSet(true); +- struct.validation_class = iprot.readString(); +- struct.setValidation_classIsSet(true); +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.index_type = IndexType.findByValue(iprot.readI32()); +- struct.setIndex_typeIsSet(true); +- } +- if (incoming.get(1)) { +- struct.index_name = iprot.readString(); +- struct.setIndex_nameIsSet(true); +- } +- if (incoming.get(2)) { +- { +- org.apache.thrift.protocol.TMap _map96 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.index_options = new HashMap(2*_map96.size); +- for (int _i97 = 0; _i97 < _map96.size; ++_i97) +- { +- String _key98; +- String _val99; +- _key98 = iprot.readString(); +- _val99 = iprot.readString(); +- struct.index_options.put(_key98, _val99); +- } +- } +- struct.setIndex_optionsIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnOrSuperColumn.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnOrSuperColumn.java +deleted file mode 100644 +index 2de9d0e..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnOrSuperColumn.java ++++ /dev/null +@@ -1,771 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Methods for fetching rows/records from Cassandra will return either a single instance of ColumnOrSuperColumn or a list +- * of ColumnOrSuperColumns (get_slice()). If you're looking up a SuperColumn (or list of SuperColumns) then the resulting +- * instances of ColumnOrSuperColumn will have the requested SuperColumn in the attribute super_column. For queries resulting +- * in Columns, those values will be in the attribute column. This change was made between 0.3 and 0.4 to standardize on +- * single query methods that may return either a SuperColumn or Column. +- * +- * If the query was on a counter column family, you will either get a counter_column (instead of a column) or a +- * counter_super_column (instead of a super_column) +- * +- * @param column. The Column returned by get() or get_slice(). +- * @param super_column. The SuperColumn returned by get() or get_slice(). +- * @param counter_column. The Counterolumn returned by get() or get_slice(). +- * @param counter_super_column. The CounterSuperColumn returned by get() or get_slice(). +- */ +-public class ColumnOrSuperColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnOrSuperColumn"); +- +- private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField SUPER_COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("super_column", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField COUNTER_COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("counter_column", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- private static final org.apache.thrift.protocol.TField COUNTER_SUPER_COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("counter_super_column", org.apache.thrift.protocol.TType.STRUCT, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new ColumnOrSuperColumnStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new ColumnOrSuperColumnTupleSchemeFactory()); +- } +- +- public Column column; // optional +- public SuperColumn super_column; // optional +- public CounterColumn counter_column; // optional +- public CounterSuperColumn counter_super_column; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- COLUMN((short)1, "column"), +- SUPER_COLUMN((short)2, "super_column"), +- COUNTER_COLUMN((short)3, "counter_column"), +- COUNTER_SUPER_COLUMN((short)4, "counter_super_column"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // COLUMN +- return COLUMN; +- case 2: // SUPER_COLUMN +- return SUPER_COLUMN; +- case 3: // COUNTER_COLUMN +- return COUNTER_COLUMN; +- case 4: // COUNTER_SUPER_COLUMN +- return COUNTER_SUPER_COLUMN; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private _Fields optionals[] = {_Fields.COLUMN,_Fields.SUPER_COLUMN,_Fields.COUNTER_COLUMN,_Fields.COUNTER_SUPER_COLUMN}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Column.class))); +- tmpMap.put(_Fields.SUPER_COLUMN, new org.apache.thrift.meta_data.FieldMetaData("super_column", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SuperColumn.class))); +- tmpMap.put(_Fields.COUNTER_COLUMN, new org.apache.thrift.meta_data.FieldMetaData("counter_column", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CounterColumn.class))); +- tmpMap.put(_Fields.COUNTER_SUPER_COLUMN, new org.apache.thrift.meta_data.FieldMetaData("counter_super_column", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CounterSuperColumn.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnOrSuperColumn.class, metaDataMap); +- } +- +- public ColumnOrSuperColumn() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public ColumnOrSuperColumn(ColumnOrSuperColumn other) { +- if (other.isSetColumn()) { +- this.column = new Column(other.column); +- } +- if (other.isSetSuper_column()) { +- this.super_column = new SuperColumn(other.super_column); +- } +- if (other.isSetCounter_column()) { +- this.counter_column = new CounterColumn(other.counter_column); +- } +- if (other.isSetCounter_super_column()) { +- this.counter_super_column = new CounterSuperColumn(other.counter_super_column); +- } +- } +- +- public ColumnOrSuperColumn deepCopy() { +- return new ColumnOrSuperColumn(this); +- } +- +- @Override +- public void clear() { +- this.column = null; +- this.super_column = null; +- this.counter_column = null; +- this.counter_super_column = null; +- } +- +- public Column getColumn() { +- return this.column; +- } +- +- public ColumnOrSuperColumn setColumn(Column column) { +- this.column = column; +- return this; +- } +- +- public void unsetColumn() { +- this.column = null; +- } +- +- /** Returns true if field column is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn() { +- return this.column != null; +- } +- +- public void setColumnIsSet(boolean value) { +- if (!value) { +- this.column = null; +- } +- } +- +- public SuperColumn getSuper_column() { +- return this.super_column; +- } +- +- public ColumnOrSuperColumn setSuper_column(SuperColumn super_column) { +- this.super_column = super_column; +- return this; +- } +- +- public void unsetSuper_column() { +- this.super_column = null; +- } +- +- /** Returns true if field super_column is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuper_column() { +- return this.super_column != null; +- } +- +- public void setSuper_columnIsSet(boolean value) { +- if (!value) { +- this.super_column = null; +- } +- } +- +- public CounterColumn getCounter_column() { +- return this.counter_column; +- } +- +- public ColumnOrSuperColumn setCounter_column(CounterColumn counter_column) { +- this.counter_column = counter_column; +- return this; +- } +- +- public void unsetCounter_column() { +- this.counter_column = null; +- } +- +- /** Returns true if field counter_column is set (has been assigned a value) and false otherwise */ +- public boolean isSetCounter_column() { +- return this.counter_column != null; +- } +- +- public void setCounter_columnIsSet(boolean value) { +- if (!value) { +- this.counter_column = null; +- } +- } +- +- public CounterSuperColumn getCounter_super_column() { +- return this.counter_super_column; +- } +- +- public ColumnOrSuperColumn setCounter_super_column(CounterSuperColumn counter_super_column) { +- this.counter_super_column = counter_super_column; +- return this; +- } +- +- public void unsetCounter_super_column() { +- this.counter_super_column = null; +- } +- +- /** Returns true if field counter_super_column is set (has been assigned a value) and false otherwise */ +- public boolean isSetCounter_super_column() { +- return this.counter_super_column != null; +- } +- +- public void setCounter_super_columnIsSet(boolean value) { +- if (!value) { +- this.counter_super_column = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case COLUMN: +- if (value == null) { +- unsetColumn(); +- } else { +- setColumn((Column)value); +- } +- break; +- +- case SUPER_COLUMN: +- if (value == null) { +- unsetSuper_column(); +- } else { +- setSuper_column((SuperColumn)value); +- } +- break; +- +- case COUNTER_COLUMN: +- if (value == null) { +- unsetCounter_column(); +- } else { +- setCounter_column((CounterColumn)value); +- } +- break; +- +- case COUNTER_SUPER_COLUMN: +- if (value == null) { +- unsetCounter_super_column(); +- } else { +- setCounter_super_column((CounterSuperColumn)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case COLUMN: +- return getColumn(); +- +- case SUPER_COLUMN: +- return getSuper_column(); +- +- case COUNTER_COLUMN: +- return getCounter_column(); +- +- case COUNTER_SUPER_COLUMN: +- return getCounter_super_column(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case COLUMN: +- return isSetColumn(); +- case SUPER_COLUMN: +- return isSetSuper_column(); +- case COUNTER_COLUMN: +- return isSetCounter_column(); +- case COUNTER_SUPER_COLUMN: +- return isSetCounter_super_column(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof ColumnOrSuperColumn) +- return this.equals((ColumnOrSuperColumn)that); +- return false; +- } +- +- public boolean equals(ColumnOrSuperColumn that) { +- if (that == null) +- return false; +- +- boolean this_present_column = true && this.isSetColumn(); +- boolean that_present_column = true && that.isSetColumn(); +- if (this_present_column || that_present_column) { +- if (!(this_present_column && that_present_column)) +- return false; +- if (!this.column.equals(that.column)) +- return false; +- } +- +- boolean this_present_super_column = true && this.isSetSuper_column(); +- boolean that_present_super_column = true && that.isSetSuper_column(); +- if (this_present_super_column || that_present_super_column) { +- if (!(this_present_super_column && that_present_super_column)) +- return false; +- if (!this.super_column.equals(that.super_column)) +- return false; +- } +- +- boolean this_present_counter_column = true && this.isSetCounter_column(); +- boolean that_present_counter_column = true && that.isSetCounter_column(); +- if (this_present_counter_column || that_present_counter_column) { +- if (!(this_present_counter_column && that_present_counter_column)) +- return false; +- if (!this.counter_column.equals(that.counter_column)) +- return false; +- } +- +- boolean this_present_counter_super_column = true && this.isSetCounter_super_column(); +- boolean that_present_counter_super_column = true && that.isSetCounter_super_column(); +- if (this_present_counter_super_column || that_present_counter_super_column) { +- if (!(this_present_counter_super_column && that_present_counter_super_column)) +- return false; +- if (!this.counter_super_column.equals(that.counter_super_column)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_column = true && (isSetColumn()); +- builder.append(present_column); +- if (present_column) +- builder.append(column); +- +- boolean present_super_column = true && (isSetSuper_column()); +- builder.append(present_super_column); +- if (present_super_column) +- builder.append(super_column); +- +- boolean present_counter_column = true && (isSetCounter_column()); +- builder.append(present_counter_column); +- if (present_counter_column) +- builder.append(counter_column); +- +- boolean present_counter_super_column = true && (isSetCounter_super_column()); +- builder.append(present_counter_super_column); +- if (present_counter_super_column) +- builder.append(counter_super_column); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(ColumnOrSuperColumn other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetColumn()).compareTo(other.isSetColumn()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column, other.column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSuper_column()).compareTo(other.isSetSuper_column()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuper_column()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.super_column, other.super_column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCounter_column()).compareTo(other.isSetCounter_column()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCounter_column()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.counter_column, other.counter_column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCounter_super_column()).compareTo(other.isSetCounter_super_column()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCounter_super_column()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.counter_super_column, other.counter_super_column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("ColumnOrSuperColumn("); +- boolean first = true; +- +- if (isSetColumn()) { +- sb.append("column:"); +- if (this.column == null) { +- sb.append("null"); +- } else { +- sb.append(this.column); +- } +- first = false; +- } +- if (isSetSuper_column()) { +- if (!first) sb.append(", "); +- sb.append("super_column:"); +- if (this.super_column == null) { +- sb.append("null"); +- } else { +- sb.append(this.super_column); +- } +- first = false; +- } +- if (isSetCounter_column()) { +- if (!first) sb.append(", "); +- sb.append("counter_column:"); +- if (this.counter_column == null) { +- sb.append("null"); +- } else { +- sb.append(this.counter_column); +- } +- first = false; +- } +- if (isSetCounter_super_column()) { +- if (!first) sb.append(", "); +- sb.append("counter_super_column:"); +- if (this.counter_super_column == null) { +- sb.append("null"); +- } else { +- sb.append(this.counter_super_column); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (column != null) { +- column.validate(); +- } +- if (super_column != null) { +- super_column.validate(); +- } +- if (counter_column != null) { +- counter_column.validate(); +- } +- if (counter_super_column != null) { +- counter_super_column.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class ColumnOrSuperColumnStandardSchemeFactory implements SchemeFactory { +- public ColumnOrSuperColumnStandardScheme getScheme() { +- return new ColumnOrSuperColumnStandardScheme(); +- } +- } +- +- private static class ColumnOrSuperColumnStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnOrSuperColumn struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column = new Column(); +- struct.column.read(iprot); +- struct.setColumnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // SUPER_COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.super_column = new SuperColumn(); +- struct.super_column.read(iprot); +- struct.setSuper_columnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // COUNTER_COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.counter_column = new CounterColumn(); +- struct.counter_column.read(iprot); +- struct.setCounter_columnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // COUNTER_SUPER_COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.counter_super_column = new CounterSuperColumn(); +- struct.counter_super_column.read(iprot); +- struct.setCounter_super_columnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnOrSuperColumn struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.column != null) { +- if (struct.isSetColumn()) { +- oprot.writeFieldBegin(COLUMN_FIELD_DESC); +- struct.column.write(oprot); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.super_column != null) { +- if (struct.isSetSuper_column()) { +- oprot.writeFieldBegin(SUPER_COLUMN_FIELD_DESC); +- struct.super_column.write(oprot); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.counter_column != null) { +- if (struct.isSetCounter_column()) { +- oprot.writeFieldBegin(COUNTER_COLUMN_FIELD_DESC); +- struct.counter_column.write(oprot); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.counter_super_column != null) { +- if (struct.isSetCounter_super_column()) { +- oprot.writeFieldBegin(COUNTER_SUPER_COLUMN_FIELD_DESC); +- struct.counter_super_column.write(oprot); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class ColumnOrSuperColumnTupleSchemeFactory implements SchemeFactory { +- public ColumnOrSuperColumnTupleScheme getScheme() { +- return new ColumnOrSuperColumnTupleScheme(); +- } +- } +- +- private static class ColumnOrSuperColumnTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, ColumnOrSuperColumn struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetColumn()) { +- optionals.set(0); +- } +- if (struct.isSetSuper_column()) { +- optionals.set(1); +- } +- if (struct.isSetCounter_column()) { +- optionals.set(2); +- } +- if (struct.isSetCounter_super_column()) { +- optionals.set(3); +- } +- oprot.writeBitSet(optionals, 4); +- if (struct.isSetColumn()) { +- struct.column.write(oprot); +- } +- if (struct.isSetSuper_column()) { +- struct.super_column.write(oprot); +- } +- if (struct.isSetCounter_column()) { +- struct.counter_column.write(oprot); +- } +- if (struct.isSetCounter_super_column()) { +- struct.counter_super_column.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, ColumnOrSuperColumn struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(4); +- if (incoming.get(0)) { +- struct.column = new Column(); +- struct.column.read(iprot); +- struct.setColumnIsSet(true); +- } +- if (incoming.get(1)) { +- struct.super_column = new SuperColumn(); +- struct.super_column.read(iprot); +- struct.setSuper_columnIsSet(true); +- } +- if (incoming.get(2)) { +- struct.counter_column = new CounterColumn(); +- struct.counter_column.read(iprot); +- struct.setCounter_columnIsSet(true); +- } +- if (incoming.get(3)) { +- struct.counter_super_column = new CounterSuperColumn(); +- struct.counter_super_column.read(iprot); +- struct.setCounter_super_columnIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnParent.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnParent.java +deleted file mode 100644 +index 73aff66..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnParent.java ++++ /dev/null +@@ -1,538 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * ColumnParent is used when selecting groups of columns from the same ColumnFamily. In directory structure terms, imagine +- * ColumnParent as ColumnPath + '/../'. +- * +- * See also ColumnPath +- */ +-public class ColumnParent implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnParent"); +- +- private static final org.apache.thrift.protocol.TField COLUMN_FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("column_family", org.apache.thrift.protocol.TType.STRING, (short)3); +- private static final org.apache.thrift.protocol.TField SUPER_COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("super_column", org.apache.thrift.protocol.TType.STRING, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new ColumnParentStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new ColumnParentTupleSchemeFactory()); +- } +- +- public String column_family; // required +- public ByteBuffer super_column; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- COLUMN_FAMILY((short)3, "column_family"), +- SUPER_COLUMN((short)4, "super_column"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 3: // COLUMN_FAMILY +- return COLUMN_FAMILY; +- case 4: // SUPER_COLUMN +- return SUPER_COLUMN; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private _Fields optionals[] = {_Fields.SUPER_COLUMN}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.COLUMN_FAMILY, new org.apache.thrift.meta_data.FieldMetaData("column_family", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.SUPER_COLUMN, new org.apache.thrift.meta_data.FieldMetaData("super_column", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnParent.class, metaDataMap); +- } +- +- public ColumnParent() { +- } +- +- public ColumnParent( +- String column_family) +- { +- this(); +- this.column_family = column_family; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public ColumnParent(ColumnParent other) { +- if (other.isSetColumn_family()) { +- this.column_family = other.column_family; +- } +- if (other.isSetSuper_column()) { +- this.super_column = org.apache.thrift.TBaseHelper.copyBinary(other.super_column); +-; +- } +- } +- +- public ColumnParent deepCopy() { +- return new ColumnParent(this); +- } +- +- @Override +- public void clear() { +- this.column_family = null; +- this.super_column = null; +- } +- +- public String getColumn_family() { +- return this.column_family; +- } +- +- public ColumnParent setColumn_family(String column_family) { +- this.column_family = column_family; +- return this; +- } +- +- public void unsetColumn_family() { +- this.column_family = null; +- } +- +- /** Returns true if field column_family is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_family() { +- return this.column_family != null; +- } +- +- public void setColumn_familyIsSet(boolean value) { +- if (!value) { +- this.column_family = null; +- } +- } +- +- public byte[] getSuper_column() { +- setSuper_column(org.apache.thrift.TBaseHelper.rightSize(super_column)); +- return super_column == null ? null : super_column.array(); +- } +- +- public ByteBuffer bufferForSuper_column() { +- return super_column; +- } +- +- public ColumnParent setSuper_column(byte[] super_column) { +- setSuper_column(super_column == null ? (ByteBuffer)null : ByteBuffer.wrap(super_column)); +- return this; +- } +- +- public ColumnParent setSuper_column(ByteBuffer super_column) { +- this.super_column = super_column; +- return this; +- } +- +- public void unsetSuper_column() { +- this.super_column = null; +- } +- +- /** Returns true if field super_column is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuper_column() { +- return this.super_column != null; +- } +- +- public void setSuper_columnIsSet(boolean value) { +- if (!value) { +- this.super_column = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case COLUMN_FAMILY: +- if (value == null) { +- unsetColumn_family(); +- } else { +- setColumn_family((String)value); +- } +- break; +- +- case SUPER_COLUMN: +- if (value == null) { +- unsetSuper_column(); +- } else { +- setSuper_column((ByteBuffer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case COLUMN_FAMILY: +- return getColumn_family(); +- +- case SUPER_COLUMN: +- return getSuper_column(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case COLUMN_FAMILY: +- return isSetColumn_family(); +- case SUPER_COLUMN: +- return isSetSuper_column(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof ColumnParent) +- return this.equals((ColumnParent)that); +- return false; +- } +- +- public boolean equals(ColumnParent that) { +- if (that == null) +- return false; +- +- boolean this_present_column_family = true && this.isSetColumn_family(); +- boolean that_present_column_family = true && that.isSetColumn_family(); +- if (this_present_column_family || that_present_column_family) { +- if (!(this_present_column_family && that_present_column_family)) +- return false; +- if (!this.column_family.equals(that.column_family)) +- return false; +- } +- +- boolean this_present_super_column = true && this.isSetSuper_column(); +- boolean that_present_super_column = true && that.isSetSuper_column(); +- if (this_present_super_column || that_present_super_column) { +- if (!(this_present_super_column && that_present_super_column)) +- return false; +- if (!this.super_column.equals(that.super_column)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_column_family = true && (isSetColumn_family()); +- builder.append(present_column_family); +- if (present_column_family) +- builder.append(column_family); +- +- boolean present_super_column = true && (isSetSuper_column()); +- builder.append(present_super_column); +- if (present_super_column) +- builder.append(super_column); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(ColumnParent other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetColumn_family()).compareTo(other.isSetColumn_family()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_family()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_family, other.column_family); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSuper_column()).compareTo(other.isSetSuper_column()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuper_column()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.super_column, other.super_column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("ColumnParent("); +- boolean first = true; +- +- sb.append("column_family:"); +- if (this.column_family == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_family); +- } +- first = false; +- if (isSetSuper_column()) { +- if (!first) sb.append(", "); +- sb.append("super_column:"); +- if (this.super_column == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.super_column, sb); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (column_family == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_family' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class ColumnParentStandardSchemeFactory implements SchemeFactory { +- public ColumnParentStandardScheme getScheme() { +- return new ColumnParentStandardScheme(); +- } +- } +- +- private static class ColumnParentStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnParent struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 3: // COLUMN_FAMILY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.column_family = iprot.readString(); +- struct.setColumn_familyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // SUPER_COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.super_column = iprot.readBinary(); +- struct.setSuper_columnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnParent struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.column_family != null) { +- oprot.writeFieldBegin(COLUMN_FAMILY_FIELD_DESC); +- oprot.writeString(struct.column_family); +- oprot.writeFieldEnd(); +- } +- if (struct.super_column != null) { +- if (struct.isSetSuper_column()) { +- oprot.writeFieldBegin(SUPER_COLUMN_FIELD_DESC); +- oprot.writeBinary(struct.super_column); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class ColumnParentTupleSchemeFactory implements SchemeFactory { +- public ColumnParentTupleScheme getScheme() { +- return new ColumnParentTupleScheme(); +- } +- } +- +- private static class ColumnParentTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, ColumnParent struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.column_family); +- BitSet optionals = new BitSet(); +- if (struct.isSetSuper_column()) { +- optionals.set(0); +- } +- oprot.writeBitSet(optionals, 1); +- if (struct.isSetSuper_column()) { +- oprot.writeBinary(struct.super_column); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, ColumnParent struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.column_family = iprot.readString(); +- struct.setColumn_familyIsSet(true); +- BitSet incoming = iprot.readBitSet(1); +- if (incoming.get(0)) { +- struct.super_column = iprot.readBinary(); +- struct.setSuper_columnIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnPath.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnPath.java +deleted file mode 100644 +index 627b9a0..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnPath.java ++++ /dev/null +@@ -1,660 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * The ColumnPath is the path to a single column in Cassandra. It might make sense to think of ColumnPath and +- * ColumnParent in terms of a directory structure. +- * +- * ColumnPath is used to looking up a single column. +- * +- * @param column_family. The name of the CF of the column being looked up. +- * @param super_column. The super column name. +- * @param column. The column name. +- */ +-public class ColumnPath implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnPath"); +- +- private static final org.apache.thrift.protocol.TField COLUMN_FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("column_family", org.apache.thrift.protocol.TType.STRING, (short)3); +- private static final org.apache.thrift.protocol.TField SUPER_COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("super_column", org.apache.thrift.protocol.TType.STRING, (short)4); +- private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)5); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new ColumnPathStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new ColumnPathTupleSchemeFactory()); +- } +- +- public String column_family; // required +- public ByteBuffer super_column; // optional +- public ByteBuffer column; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- COLUMN_FAMILY((short)3, "column_family"), +- SUPER_COLUMN((short)4, "super_column"), +- COLUMN((short)5, "column"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 3: // COLUMN_FAMILY +- return COLUMN_FAMILY; +- case 4: // SUPER_COLUMN +- return SUPER_COLUMN; +- case 5: // COLUMN +- return COLUMN; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private _Fields optionals[] = {_Fields.SUPER_COLUMN,_Fields.COLUMN}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.COLUMN_FAMILY, new org.apache.thrift.meta_data.FieldMetaData("column_family", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.SUPER_COLUMN, new org.apache.thrift.meta_data.FieldMetaData("super_column", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnPath.class, metaDataMap); +- } +- +- public ColumnPath() { +- } +- +- public ColumnPath( +- String column_family) +- { +- this(); +- this.column_family = column_family; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public ColumnPath(ColumnPath other) { +- if (other.isSetColumn_family()) { +- this.column_family = other.column_family; +- } +- if (other.isSetSuper_column()) { +- this.super_column = org.apache.thrift.TBaseHelper.copyBinary(other.super_column); +-; +- } +- if (other.isSetColumn()) { +- this.column = org.apache.thrift.TBaseHelper.copyBinary(other.column); +-; +- } +- } +- +- public ColumnPath deepCopy() { +- return new ColumnPath(this); +- } +- +- @Override +- public void clear() { +- this.column_family = null; +- this.super_column = null; +- this.column = null; +- } +- +- public String getColumn_family() { +- return this.column_family; +- } +- +- public ColumnPath setColumn_family(String column_family) { +- this.column_family = column_family; +- return this; +- } +- +- public void unsetColumn_family() { +- this.column_family = null; +- } +- +- /** Returns true if field column_family is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_family() { +- return this.column_family != null; +- } +- +- public void setColumn_familyIsSet(boolean value) { +- if (!value) { +- this.column_family = null; +- } +- } +- +- public byte[] getSuper_column() { +- setSuper_column(org.apache.thrift.TBaseHelper.rightSize(super_column)); +- return super_column == null ? null : super_column.array(); +- } +- +- public ByteBuffer bufferForSuper_column() { +- return super_column; +- } +- +- public ColumnPath setSuper_column(byte[] super_column) { +- setSuper_column(super_column == null ? (ByteBuffer)null : ByteBuffer.wrap(super_column)); +- return this; +- } +- +- public ColumnPath setSuper_column(ByteBuffer super_column) { +- this.super_column = super_column; +- return this; +- } +- +- public void unsetSuper_column() { +- this.super_column = null; +- } +- +- /** Returns true if field super_column is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuper_column() { +- return this.super_column != null; +- } +- +- public void setSuper_columnIsSet(boolean value) { +- if (!value) { +- this.super_column = null; +- } +- } +- +- public byte[] getColumn() { +- setColumn(org.apache.thrift.TBaseHelper.rightSize(column)); +- return column == null ? null : column.array(); +- } +- +- public ByteBuffer bufferForColumn() { +- return column; +- } +- +- public ColumnPath setColumn(byte[] column) { +- setColumn(column == null ? (ByteBuffer)null : ByteBuffer.wrap(column)); +- return this; +- } +- +- public ColumnPath setColumn(ByteBuffer column) { +- this.column = column; +- return this; +- } +- +- public void unsetColumn() { +- this.column = null; +- } +- +- /** Returns true if field column is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn() { +- return this.column != null; +- } +- +- public void setColumnIsSet(boolean value) { +- if (!value) { +- this.column = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case COLUMN_FAMILY: +- if (value == null) { +- unsetColumn_family(); +- } else { +- setColumn_family((String)value); +- } +- break; +- +- case SUPER_COLUMN: +- if (value == null) { +- unsetSuper_column(); +- } else { +- setSuper_column((ByteBuffer)value); +- } +- break; +- +- case COLUMN: +- if (value == null) { +- unsetColumn(); +- } else { +- setColumn((ByteBuffer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case COLUMN_FAMILY: +- return getColumn_family(); +- +- case SUPER_COLUMN: +- return getSuper_column(); +- +- case COLUMN: +- return getColumn(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case COLUMN_FAMILY: +- return isSetColumn_family(); +- case SUPER_COLUMN: +- return isSetSuper_column(); +- case COLUMN: +- return isSetColumn(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof ColumnPath) +- return this.equals((ColumnPath)that); +- return false; +- } +- +- public boolean equals(ColumnPath that) { +- if (that == null) +- return false; +- +- boolean this_present_column_family = true && this.isSetColumn_family(); +- boolean that_present_column_family = true && that.isSetColumn_family(); +- if (this_present_column_family || that_present_column_family) { +- if (!(this_present_column_family && that_present_column_family)) +- return false; +- if (!this.column_family.equals(that.column_family)) +- return false; +- } +- +- boolean this_present_super_column = true && this.isSetSuper_column(); +- boolean that_present_super_column = true && that.isSetSuper_column(); +- if (this_present_super_column || that_present_super_column) { +- if (!(this_present_super_column && that_present_super_column)) +- return false; +- if (!this.super_column.equals(that.super_column)) +- return false; +- } +- +- boolean this_present_column = true && this.isSetColumn(); +- boolean that_present_column = true && that.isSetColumn(); +- if (this_present_column || that_present_column) { +- if (!(this_present_column && that_present_column)) +- return false; +- if (!this.column.equals(that.column)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_column_family = true && (isSetColumn_family()); +- builder.append(present_column_family); +- if (present_column_family) +- builder.append(column_family); +- +- boolean present_super_column = true && (isSetSuper_column()); +- builder.append(present_super_column); +- if (present_super_column) +- builder.append(super_column); +- +- boolean present_column = true && (isSetColumn()); +- builder.append(present_column); +- if (present_column) +- builder.append(column); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(ColumnPath other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetColumn_family()).compareTo(other.isSetColumn_family()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_family()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_family, other.column_family); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSuper_column()).compareTo(other.isSetSuper_column()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuper_column()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.super_column, other.super_column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn()).compareTo(other.isSetColumn()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column, other.column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("ColumnPath("); +- boolean first = true; +- +- sb.append("column_family:"); +- if (this.column_family == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_family); +- } +- first = false; +- if (isSetSuper_column()) { +- if (!first) sb.append(", "); +- sb.append("super_column:"); +- if (this.super_column == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.super_column, sb); +- } +- first = false; +- } +- if (isSetColumn()) { +- if (!first) sb.append(", "); +- sb.append("column:"); +- if (this.column == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.column, sb); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (column_family == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_family' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class ColumnPathStandardSchemeFactory implements SchemeFactory { +- public ColumnPathStandardScheme getScheme() { +- return new ColumnPathStandardScheme(); +- } +- } +- +- private static class ColumnPathStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnPath struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 3: // COLUMN_FAMILY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.column_family = iprot.readString(); +- struct.setColumn_familyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // SUPER_COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.super_column = iprot.readBinary(); +- struct.setSuper_columnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 5: // COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.column = iprot.readBinary(); +- struct.setColumnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnPath struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.column_family != null) { +- oprot.writeFieldBegin(COLUMN_FAMILY_FIELD_DESC); +- oprot.writeString(struct.column_family); +- oprot.writeFieldEnd(); +- } +- if (struct.super_column != null) { +- if (struct.isSetSuper_column()) { +- oprot.writeFieldBegin(SUPER_COLUMN_FIELD_DESC); +- oprot.writeBinary(struct.super_column); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.column != null) { +- if (struct.isSetColumn()) { +- oprot.writeFieldBegin(COLUMN_FIELD_DESC); +- oprot.writeBinary(struct.column); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class ColumnPathTupleSchemeFactory implements SchemeFactory { +- public ColumnPathTupleScheme getScheme() { +- return new ColumnPathTupleScheme(); +- } +- } +- +- private static class ColumnPathTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, ColumnPath struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.column_family); +- BitSet optionals = new BitSet(); +- if (struct.isSetSuper_column()) { +- optionals.set(0); +- } +- if (struct.isSetColumn()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetSuper_column()) { +- oprot.writeBinary(struct.super_column); +- } +- if (struct.isSetColumn()) { +- oprot.writeBinary(struct.column); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, ColumnPath struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.column_family = iprot.readString(); +- struct.setColumn_familyIsSet(true); +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- struct.super_column = iprot.readBinary(); +- struct.setSuper_columnIsSet(true); +- } +- if (incoming.get(1)) { +- struct.column = iprot.readBinary(); +- struct.setColumnIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnSlice.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnSlice.java +deleted file mode 100644 +index 67b88a3..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnSlice.java ++++ /dev/null +@@ -1,551 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * The ColumnSlice is used to select a set of columns from inside a row. +- * If start or finish are unspecified they will default to the start-of +- * end-of value. +- * @param start. The start of the ColumnSlice inclusive +- * @param finish. The end of the ColumnSlice inclusive +- */ +-public class ColumnSlice implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnSlice"); +- +- private static final org.apache.thrift.protocol.TField START_FIELD_DESC = new org.apache.thrift.protocol.TField("start", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField FINISH_FIELD_DESC = new org.apache.thrift.protocol.TField("finish", org.apache.thrift.protocol.TType.STRING, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new ColumnSliceStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new ColumnSliceTupleSchemeFactory()); +- } +- +- public ByteBuffer start; // optional +- public ByteBuffer finish; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- START((short)1, "start"), +- FINISH((short)2, "finish"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // START +- return START; +- case 2: // FINISH +- return FINISH; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private _Fields optionals[] = {_Fields.START,_Fields.FINISH}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.START, new org.apache.thrift.meta_data.FieldMetaData("start", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.FINISH, new org.apache.thrift.meta_data.FieldMetaData("finish", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnSlice.class, metaDataMap); +- } +- +- public ColumnSlice() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public ColumnSlice(ColumnSlice other) { +- if (other.isSetStart()) { +- this.start = org.apache.thrift.TBaseHelper.copyBinary(other.start); +-; +- } +- if (other.isSetFinish()) { +- this.finish = org.apache.thrift.TBaseHelper.copyBinary(other.finish); +-; +- } +- } +- +- public ColumnSlice deepCopy() { +- return new ColumnSlice(this); +- } +- +- @Override +- public void clear() { +- this.start = null; +- this.finish = null; +- } +- +- public byte[] getStart() { +- setStart(org.apache.thrift.TBaseHelper.rightSize(start)); +- return start == null ? null : start.array(); +- } +- +- public ByteBuffer bufferForStart() { +- return start; +- } +- +- public ColumnSlice setStart(byte[] start) { +- setStart(start == null ? (ByteBuffer)null : ByteBuffer.wrap(start)); +- return this; +- } +- +- public ColumnSlice setStart(ByteBuffer start) { +- this.start = start; +- return this; +- } +- +- public void unsetStart() { +- this.start = null; +- } +- +- /** Returns true if field start is set (has been assigned a value) and false otherwise */ +- public boolean isSetStart() { +- return this.start != null; +- } +- +- public void setStartIsSet(boolean value) { +- if (!value) { +- this.start = null; +- } +- } +- +- public byte[] getFinish() { +- setFinish(org.apache.thrift.TBaseHelper.rightSize(finish)); +- return finish == null ? null : finish.array(); +- } +- +- public ByteBuffer bufferForFinish() { +- return finish; +- } +- +- public ColumnSlice setFinish(byte[] finish) { +- setFinish(finish == null ? (ByteBuffer)null : ByteBuffer.wrap(finish)); +- return this; +- } +- +- public ColumnSlice setFinish(ByteBuffer finish) { +- this.finish = finish; +- return this; +- } +- +- public void unsetFinish() { +- this.finish = null; +- } +- +- /** Returns true if field finish is set (has been assigned a value) and false otherwise */ +- public boolean isSetFinish() { +- return this.finish != null; +- } +- +- public void setFinishIsSet(boolean value) { +- if (!value) { +- this.finish = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case START: +- if (value == null) { +- unsetStart(); +- } else { +- setStart((ByteBuffer)value); +- } +- break; +- +- case FINISH: +- if (value == null) { +- unsetFinish(); +- } else { +- setFinish((ByteBuffer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case START: +- return getStart(); +- +- case FINISH: +- return getFinish(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case START: +- return isSetStart(); +- case FINISH: +- return isSetFinish(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof ColumnSlice) +- return this.equals((ColumnSlice)that); +- return false; +- } +- +- public boolean equals(ColumnSlice that) { +- if (that == null) +- return false; +- +- boolean this_present_start = true && this.isSetStart(); +- boolean that_present_start = true && that.isSetStart(); +- if (this_present_start || that_present_start) { +- if (!(this_present_start && that_present_start)) +- return false; +- if (!this.start.equals(that.start)) +- return false; +- } +- +- boolean this_present_finish = true && this.isSetFinish(); +- boolean that_present_finish = true && that.isSetFinish(); +- if (this_present_finish || that_present_finish) { +- if (!(this_present_finish && that_present_finish)) +- return false; +- if (!this.finish.equals(that.finish)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_start = true && (isSetStart()); +- builder.append(present_start); +- if (present_start) +- builder.append(start); +- +- boolean present_finish = true && (isSetFinish()); +- builder.append(present_finish); +- if (present_finish) +- builder.append(finish); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(ColumnSlice other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetStart()).compareTo(other.isSetStart()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStart()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start, other.start); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetFinish()).compareTo(other.isSetFinish()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetFinish()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.finish, other.finish); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("ColumnSlice("); +- boolean first = true; +- +- if (isSetStart()) { +- sb.append("start:"); +- if (this.start == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.start, sb); +- } +- first = false; +- } +- if (isSetFinish()) { +- if (!first) sb.append(", "); +- sb.append("finish:"); +- if (this.finish == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.finish, sb); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class ColumnSliceStandardSchemeFactory implements SchemeFactory { +- public ColumnSliceStandardScheme getScheme() { +- return new ColumnSliceStandardScheme(); +- } +- } +- +- private static class ColumnSliceStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnSlice struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // START +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.start = iprot.readBinary(); +- struct.setStartIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // FINISH +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.finish = iprot.readBinary(); +- struct.setFinishIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnSlice struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.start != null) { +- if (struct.isSetStart()) { +- oprot.writeFieldBegin(START_FIELD_DESC); +- oprot.writeBinary(struct.start); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.finish != null) { +- if (struct.isSetFinish()) { +- oprot.writeFieldBegin(FINISH_FIELD_DESC); +- oprot.writeBinary(struct.finish); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class ColumnSliceTupleSchemeFactory implements SchemeFactory { +- public ColumnSliceTupleScheme getScheme() { +- return new ColumnSliceTupleScheme(); +- } +- } +- +- private static class ColumnSliceTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, ColumnSlice struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetStart()) { +- optionals.set(0); +- } +- if (struct.isSetFinish()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetStart()) { +- oprot.writeBinary(struct.start); +- } +- if (struct.isSetFinish()) { +- oprot.writeBinary(struct.finish); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, ColumnSlice struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- struct.start = iprot.readBinary(); +- struct.setStartIsSet(true); +- } +- if (incoming.get(1)) { +- struct.finish = iprot.readBinary(); +- struct.setFinishIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/Compression.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/Compression.java +deleted file mode 100644 +index acaf43f..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/Compression.java ++++ /dev/null +@@ -1,69 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +- +-import java.util.Map; +-import java.util.HashMap; +-import org.apache.thrift.TEnum; +- +-/** +- * CQL query compression +- */ +-public enum Compression implements org.apache.thrift.TEnum { +- GZIP(1), +- NONE(2); +- +- private final int value; +- +- private Compression(int value) { +- this.value = value; +- } +- +- /** +- * Get the integer value of this enum value, as defined in the Thrift IDL. +- */ +- public int getValue() { +- return value; +- } +- +- /** +- * Find a the enum type by its integer value, as defined in the Thrift IDL. +- * @return null if the value is not found. +- */ +- public static Compression findByValue(int value) { +- switch (value) { +- case 1: +- return GZIP; +- case 2: +- return NONE; +- default: +- return null; +- } +- } +-} +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/ConsistencyLevel.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/ConsistencyLevel.java +deleted file mode 100644 +index ec5080a..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/ConsistencyLevel.java ++++ /dev/null +@@ -1,137 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +- +-import java.util.Map; +-import java.util.HashMap; +-import org.apache.thrift.TEnum; +- +-/** +- * The ConsistencyLevel is an enum that controls both read and write +- * behavior based on the ReplicationFactor of the keyspace. The +- * different consistency levels have different meanings, depending on +- * if you're doing a write or read operation. +- * +- * If W + R > ReplicationFactor, where W is the number of nodes to +- * block for on write, and R the number to block for on reads, you +- * will have strongly consistent behavior; that is, readers will +- * always see the most recent write. Of these, the most interesting is +- * to do QUORUM reads and writes, which gives you consistency while +- * still allowing availability in the face of node failures up to half +- * of . Of course if latency is more important than +- * consistency then you can use lower values for either or both. +- * +- * Some ConsistencyLevels (ONE, TWO, THREE) refer to a specific number +- * of replicas rather than a logical concept that adjusts +- * automatically with the replication factor. Of these, only ONE is +- * commonly used; TWO and (even more rarely) THREE are only useful +- * when you care more about guaranteeing a certain level of +- * durability, than consistency. +- * +- * Write consistency levels make the following guarantees before reporting success to the client: +- * ANY Ensure that the write has been written once somewhere, including possibly being hinted in a non-target node. +- * ONE Ensure that the write has been written to at least 1 node's commit log and memory table +- * TWO Ensure that the write has been written to at least 2 node's commit log and memory table +- * THREE Ensure that the write has been written to at least 3 node's commit log and memory table +- * QUORUM Ensure that the write has been written to / 2 + 1 nodes +- * LOCAL_ONE Ensure that the write has been written to 1 node within the local datacenter (requires NetworkTopologyStrategy) +- * LOCAL_QUORUM Ensure that the write has been written to / 2 + 1 nodes, within the local datacenter (requires NetworkTopologyStrategy) +- * EACH_QUORUM Ensure that the write has been written to / 2 + 1 nodes in each datacenter (requires NetworkTopologyStrategy) +- * ALL Ensure that the write is written to <ReplicationFactor> nodes before responding to the client. +- * +- * Read consistency levels make the following guarantees before returning successful results to the client: +- * ANY Not supported. You probably want ONE instead. +- * ONE Returns the record obtained from a single replica. +- * TWO Returns the record with the most recent timestamp once two replicas have replied. +- * THREE Returns the record with the most recent timestamp once three replicas have replied. +- * QUORUM Returns the record with the most recent timestamp once a majority of replicas have replied. +- * LOCAL_ONE Returns the record with the most recent timestamp once a single replica within the local datacenter have replied. +- * LOCAL_QUORUM Returns the record with the most recent timestamp once a majority of replicas within the local datacenter have replied. +- * EACH_QUORUM Returns the record with the most recent timestamp once a majority of replicas within each datacenter have replied. +- * ALL Returns the record with the most recent timestamp once all replicas have replied (implies no replica may be down).. +- */ +-public enum ConsistencyLevel implements org.apache.thrift.TEnum { +- ONE(1), +- QUORUM(2), +- LOCAL_QUORUM(3), +- EACH_QUORUM(4), +- ALL(5), +- ANY(6), +- TWO(7), +- THREE(8), +- SERIAL(9), +- LOCAL_SERIAL(10), +- LOCAL_ONE(11); +- +- private final int value; +- +- private ConsistencyLevel(int value) { +- this.value = value; +- } +- +- /** +- * Get the integer value of this enum value, as defined in the Thrift IDL. +- */ +- public int getValue() { +- return value; +- } +- +- /** +- * Find a the enum type by its integer value, as defined in the Thrift IDL. +- * @return null if the value is not found. +- */ +- public static ConsistencyLevel findByValue(int value) { +- switch (value) { +- case 1: +- return ONE; +- case 2: +- return QUORUM; +- case 3: +- return LOCAL_QUORUM; +- case 4: +- return EACH_QUORUM; +- case 5: +- return ALL; +- case 6: +- return ANY; +- case 7: +- return TWO; +- case 8: +- return THREE; +- case 9: +- return SERIAL; +- case 10: +- return LOCAL_SERIAL; +- case 11: +- return LOCAL_ONE; +- default: +- return null; +- } +- } +-} +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CounterColumn.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CounterColumn.java +deleted file mode 100644 +index 6069218..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CounterColumn.java ++++ /dev/null +@@ -1,521 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class CounterColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CounterColumn"); +- +- private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.I64, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new CounterColumnStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new CounterColumnTupleSchemeFactory()); +- } +- +- public ByteBuffer name; // required +- public long value; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- NAME((short)1, "name"), +- VALUE((short)2, "value"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // NAME +- return NAME; +- case 2: // VALUE +- return VALUE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __VALUE_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CounterColumn.class, metaDataMap); +- } +- +- public CounterColumn() { +- } +- +- public CounterColumn( +- ByteBuffer name, +- long value) +- { +- this(); +- this.name = name; +- this.value = value; +- setValueIsSet(true); +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public CounterColumn(CounterColumn other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetName()) { +- this.name = org.apache.thrift.TBaseHelper.copyBinary(other.name); +-; +- } +- this.value = other.value; +- } +- +- public CounterColumn deepCopy() { +- return new CounterColumn(this); +- } +- +- @Override +- public void clear() { +- this.name = null; +- setValueIsSet(false); +- this.value = 0; +- } +- +- public byte[] getName() { +- setName(org.apache.thrift.TBaseHelper.rightSize(name)); +- return name == null ? null : name.array(); +- } +- +- public ByteBuffer bufferForName() { +- return name; +- } +- +- public CounterColumn setName(byte[] name) { +- setName(name == null ? (ByteBuffer)null : ByteBuffer.wrap(name)); +- return this; +- } +- +- public CounterColumn setName(ByteBuffer name) { +- this.name = name; +- return this; +- } +- +- public void unsetName() { +- this.name = null; +- } +- +- /** Returns true if field name is set (has been assigned a value) and false otherwise */ +- public boolean isSetName() { +- return this.name != null; +- } +- +- public void setNameIsSet(boolean value) { +- if (!value) { +- this.name = null; +- } +- } +- +- public long getValue() { +- return this.value; +- } +- +- public CounterColumn setValue(long value) { +- this.value = value; +- setValueIsSet(true); +- return this; +- } +- +- public void unsetValue() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); +- } +- +- /** Returns true if field value is set (has been assigned a value) and false otherwise */ +- public boolean isSetValue() { +- return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); +- } +- +- public void setValueIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case NAME: +- if (value == null) { +- unsetName(); +- } else { +- setName((ByteBuffer)value); +- } +- break; +- +- case VALUE: +- if (value == null) { +- unsetValue(); +- } else { +- setValue((Long)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case NAME: +- return getName(); +- +- case VALUE: +- return Long.valueOf(getValue()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case NAME: +- return isSetName(); +- case VALUE: +- return isSetValue(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof CounterColumn) +- return this.equals((CounterColumn)that); +- return false; +- } +- +- public boolean equals(CounterColumn that) { +- if (that == null) +- return false; +- +- boolean this_present_name = true && this.isSetName(); +- boolean that_present_name = true && that.isSetName(); +- if (this_present_name || that_present_name) { +- if (!(this_present_name && that_present_name)) +- return false; +- if (!this.name.equals(that.name)) +- return false; +- } +- +- boolean this_present_value = true; +- boolean that_present_value = true; +- if (this_present_value || that_present_value) { +- if (!(this_present_value && that_present_value)) +- return false; +- if (this.value != that.value) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_name = true && (isSetName()); +- builder.append(present_name); +- if (present_name) +- builder.append(name); +- +- boolean present_value = true; +- builder.append(present_value); +- if (present_value) +- builder.append(value); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(CounterColumn other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetName()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetValue()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("CounterColumn("); +- boolean first = true; +- +- sb.append("name:"); +- if (this.name == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.name, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("value:"); +- sb.append(this.value); +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (name == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString()); +- } +- // alas, we cannot check 'value' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class CounterColumnStandardSchemeFactory implements SchemeFactory { +- public CounterColumnStandardScheme getScheme() { +- return new CounterColumnStandardScheme(); +- } +- } +- +- private static class CounterColumnStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, CounterColumn struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.name = iprot.readBinary(); +- struct.setNameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // VALUE +- if (schemeField.type == org.apache.thrift.protocol.TType.I64) { +- struct.value = iprot.readI64(); +- struct.setValueIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetValue()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'value' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, CounterColumn struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.name != null) { +- oprot.writeFieldBegin(NAME_FIELD_DESC); +- oprot.writeBinary(struct.name); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldBegin(VALUE_FIELD_DESC); +- oprot.writeI64(struct.value); +- oprot.writeFieldEnd(); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class CounterColumnTupleSchemeFactory implements SchemeFactory { +- public CounterColumnTupleScheme getScheme() { +- return new CounterColumnTupleScheme(); +- } +- } +- +- private static class CounterColumnTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, CounterColumn struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.name); +- oprot.writeI64(struct.value); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, CounterColumn struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.name = iprot.readBinary(); +- struct.setNameIsSet(true); +- struct.value = iprot.readI64(); +- struct.setValueIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CounterSuperColumn.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CounterSuperColumn.java +deleted file mode 100644 +index 96234c7..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CounterSuperColumn.java ++++ /dev/null +@@ -1,576 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class CounterSuperColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CounterSuperColumn"); +- +- private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new CounterSuperColumnStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new CounterSuperColumnTupleSchemeFactory()); +- } +- +- public ByteBuffer name; // required +- public List columns; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- NAME((short)1, "name"), +- COLUMNS((short)2, "columns"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // NAME +- return NAME; +- case 2: // COLUMNS +- return COLUMNS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CounterColumn.class)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CounterSuperColumn.class, metaDataMap); +- } +- +- public CounterSuperColumn() { +- } +- +- public CounterSuperColumn( +- ByteBuffer name, +- List columns) +- { +- this(); +- this.name = name; +- this.columns = columns; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public CounterSuperColumn(CounterSuperColumn other) { +- if (other.isSetName()) { +- this.name = org.apache.thrift.TBaseHelper.copyBinary(other.name); +-; +- } +- if (other.isSetColumns()) { +- List __this__columns = new ArrayList(other.columns.size()); +- for (CounterColumn other_element : other.columns) { +- __this__columns.add(new CounterColumn(other_element)); +- } +- this.columns = __this__columns; +- } +- } +- +- public CounterSuperColumn deepCopy() { +- return new CounterSuperColumn(this); +- } +- +- @Override +- public void clear() { +- this.name = null; +- this.columns = null; +- } +- +- public byte[] getName() { +- setName(org.apache.thrift.TBaseHelper.rightSize(name)); +- return name == null ? null : name.array(); +- } +- +- public ByteBuffer bufferForName() { +- return name; +- } +- +- public CounterSuperColumn setName(byte[] name) { +- setName(name == null ? (ByteBuffer)null : ByteBuffer.wrap(name)); +- return this; +- } +- +- public CounterSuperColumn setName(ByteBuffer name) { +- this.name = name; +- return this; +- } +- +- public void unsetName() { +- this.name = null; +- } +- +- /** Returns true if field name is set (has been assigned a value) and false otherwise */ +- public boolean isSetName() { +- return this.name != null; +- } +- +- public void setNameIsSet(boolean value) { +- if (!value) { +- this.name = null; +- } +- } +- +- public int getColumnsSize() { +- return (this.columns == null) ? 0 : this.columns.size(); +- } +- +- public java.util.Iterator getColumnsIterator() { +- return (this.columns == null) ? null : this.columns.iterator(); +- } +- +- public void addToColumns(CounterColumn elem) { +- if (this.columns == null) { +- this.columns = new ArrayList(); +- } +- this.columns.add(elem); +- } +- +- public List getColumns() { +- return this.columns; +- } +- +- public CounterSuperColumn setColumns(List columns) { +- this.columns = columns; +- return this; +- } +- +- public void unsetColumns() { +- this.columns = null; +- } +- +- /** Returns true if field columns is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumns() { +- return this.columns != null; +- } +- +- public void setColumnsIsSet(boolean value) { +- if (!value) { +- this.columns = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case NAME: +- if (value == null) { +- unsetName(); +- } else { +- setName((ByteBuffer)value); +- } +- break; +- +- case COLUMNS: +- if (value == null) { +- unsetColumns(); +- } else { +- setColumns((List)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case NAME: +- return getName(); +- +- case COLUMNS: +- return getColumns(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case NAME: +- return isSetName(); +- case COLUMNS: +- return isSetColumns(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof CounterSuperColumn) +- return this.equals((CounterSuperColumn)that); +- return false; +- } +- +- public boolean equals(CounterSuperColumn that) { +- if (that == null) +- return false; +- +- boolean this_present_name = true && this.isSetName(); +- boolean that_present_name = true && that.isSetName(); +- if (this_present_name || that_present_name) { +- if (!(this_present_name && that_present_name)) +- return false; +- if (!this.name.equals(that.name)) +- return false; +- } +- +- boolean this_present_columns = true && this.isSetColumns(); +- boolean that_present_columns = true && that.isSetColumns(); +- if (this_present_columns || that_present_columns) { +- if (!(this_present_columns && that_present_columns)) +- return false; +- if (!this.columns.equals(that.columns)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_name = true && (isSetName()); +- builder.append(present_name); +- if (present_name) +- builder.append(name); +- +- boolean present_columns = true && (isSetColumns()); +- builder.append(present_columns); +- if (present_columns) +- builder.append(columns); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(CounterSuperColumn other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetName()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumns()).compareTo(other.isSetColumns()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumns()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columns, other.columns); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("CounterSuperColumn("); +- boolean first = true; +- +- sb.append("name:"); +- if (this.name == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.name, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("columns:"); +- if (this.columns == null) { +- sb.append("null"); +- } else { +- sb.append(this.columns); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (name == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString()); +- } +- if (columns == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'columns' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class CounterSuperColumnStandardSchemeFactory implements SchemeFactory { +- public CounterSuperColumnStandardScheme getScheme() { +- return new CounterSuperColumnStandardScheme(); +- } +- } +- +- private static class CounterSuperColumnStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, CounterSuperColumn struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.name = iprot.readBinary(); +- struct.setNameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMNS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list8 = iprot.readListBegin(); +- struct.columns = new ArrayList(_list8.size); +- for (int _i9 = 0; _i9 < _list8.size; ++_i9) +- { +- CounterColumn _elem10; +- _elem10 = new CounterColumn(); +- _elem10.read(iprot); +- struct.columns.add(_elem10); +- } +- iprot.readListEnd(); +- } +- struct.setColumnsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, CounterSuperColumn struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.name != null) { +- oprot.writeFieldBegin(NAME_FIELD_DESC); +- oprot.writeBinary(struct.name); +- oprot.writeFieldEnd(); +- } +- if (struct.columns != null) { +- oprot.writeFieldBegin(COLUMNS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size())); +- for (CounterColumn _iter11 : struct.columns) +- { +- _iter11.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class CounterSuperColumnTupleSchemeFactory implements SchemeFactory { +- public CounterSuperColumnTupleScheme getScheme() { +- return new CounterSuperColumnTupleScheme(); +- } +- } +- +- private static class CounterSuperColumnTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, CounterSuperColumn struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.name); +- { +- oprot.writeI32(struct.columns.size()); +- for (CounterColumn _iter12 : struct.columns) +- { +- _iter12.write(oprot); +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, CounterSuperColumn struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.name = iprot.readBinary(); +- struct.setNameIsSet(true); +- { +- org.apache.thrift.protocol.TList _list13 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.columns = new ArrayList(_list13.size); +- for (int _i14 = 0; _i14 < _list13.size; ++_i14) +- { +- CounterColumn _elem15; +- _elem15 = new CounterColumn(); +- _elem15.read(iprot); +- struct.columns.add(_elem15); +- } +- } +- struct.setColumnsIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlMetadata.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlMetadata.java +deleted file mode 100644 +index e2dcfa3..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlMetadata.java ++++ /dev/null +@@ -1,817 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class CqlMetadata implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CqlMetadata"); +- +- private static final org.apache.thrift.protocol.TField NAME_TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("name_types", org.apache.thrift.protocol.TType.MAP, (short)1); +- private static final org.apache.thrift.protocol.TField VALUE_TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("value_types", org.apache.thrift.protocol.TType.MAP, (short)2); +- private static final org.apache.thrift.protocol.TField DEFAULT_NAME_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("default_name_type", org.apache.thrift.protocol.TType.STRING, (short)3); +- private static final org.apache.thrift.protocol.TField DEFAULT_VALUE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("default_value_type", org.apache.thrift.protocol.TType.STRING, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new CqlMetadataStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new CqlMetadataTupleSchemeFactory()); +- } +- +- public Map name_types; // required +- public Map value_types; // required +- public String default_name_type; // required +- public String default_value_type; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- NAME_TYPES((short)1, "name_types"), +- VALUE_TYPES((short)2, "value_types"), +- DEFAULT_NAME_TYPE((short)3, "default_name_type"), +- DEFAULT_VALUE_TYPE((short)4, "default_value_type"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // NAME_TYPES +- return NAME_TYPES; +- case 2: // VALUE_TYPES +- return VALUE_TYPES; +- case 3: // DEFAULT_NAME_TYPE +- return DEFAULT_NAME_TYPE; +- case 4: // DEFAULT_VALUE_TYPE +- return DEFAULT_VALUE_TYPE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.NAME_TYPES, new org.apache.thrift.meta_data.FieldMetaData("name_types", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true), +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- tmpMap.put(_Fields.VALUE_TYPES, new org.apache.thrift.meta_data.FieldMetaData("value_types", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true), +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- tmpMap.put(_Fields.DEFAULT_NAME_TYPE, new org.apache.thrift.meta_data.FieldMetaData("default_name_type", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.DEFAULT_VALUE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("default_value_type", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CqlMetadata.class, metaDataMap); +- } +- +- public CqlMetadata() { +- } +- +- public CqlMetadata( +- Map name_types, +- Map value_types, +- String default_name_type, +- String default_value_type) +- { +- this(); +- this.name_types = name_types; +- this.value_types = value_types; +- this.default_name_type = default_name_type; +- this.default_value_type = default_value_type; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public CqlMetadata(CqlMetadata other) { +- if (other.isSetName_types()) { +- Map __this__name_types = new HashMap(other.name_types); +- this.name_types = __this__name_types; +- } +- if (other.isSetValue_types()) { +- Map __this__value_types = new HashMap(other.value_types); +- this.value_types = __this__value_types; +- } +- if (other.isSetDefault_name_type()) { +- this.default_name_type = other.default_name_type; +- } +- if (other.isSetDefault_value_type()) { +- this.default_value_type = other.default_value_type; +- } +- } +- +- public CqlMetadata deepCopy() { +- return new CqlMetadata(this); +- } +- +- @Override +- public void clear() { +- this.name_types = null; +- this.value_types = null; +- this.default_name_type = null; +- this.default_value_type = null; +- } +- +- public int getName_typesSize() { +- return (this.name_types == null) ? 0 : this.name_types.size(); +- } +- +- public void putToName_types(ByteBuffer key, String val) { +- if (this.name_types == null) { +- this.name_types = new HashMap(); +- } +- this.name_types.put(key, val); +- } +- +- public Map getName_types() { +- return this.name_types; +- } +- +- public CqlMetadata setName_types(Map name_types) { +- this.name_types = name_types; +- return this; +- } +- +- public void unsetName_types() { +- this.name_types = null; +- } +- +- /** Returns true if field name_types is set (has been assigned a value) and false otherwise */ +- public boolean isSetName_types() { +- return this.name_types != null; +- } +- +- public void setName_typesIsSet(boolean value) { +- if (!value) { +- this.name_types = null; +- } +- } +- +- public int getValue_typesSize() { +- return (this.value_types == null) ? 0 : this.value_types.size(); +- } +- +- public void putToValue_types(ByteBuffer key, String val) { +- if (this.value_types == null) { +- this.value_types = new HashMap(); +- } +- this.value_types.put(key, val); +- } +- +- public Map getValue_types() { +- return this.value_types; +- } +- +- public CqlMetadata setValue_types(Map value_types) { +- this.value_types = value_types; +- return this; +- } +- +- public void unsetValue_types() { +- this.value_types = null; +- } +- +- /** Returns true if field value_types is set (has been assigned a value) and false otherwise */ +- public boolean isSetValue_types() { +- return this.value_types != null; +- } +- +- public void setValue_typesIsSet(boolean value) { +- if (!value) { +- this.value_types = null; +- } +- } +- +- public String getDefault_name_type() { +- return this.default_name_type; +- } +- +- public CqlMetadata setDefault_name_type(String default_name_type) { +- this.default_name_type = default_name_type; +- return this; +- } +- +- public void unsetDefault_name_type() { +- this.default_name_type = null; +- } +- +- /** Returns true if field default_name_type is set (has been assigned a value) and false otherwise */ +- public boolean isSetDefault_name_type() { +- return this.default_name_type != null; +- } +- +- public void setDefault_name_typeIsSet(boolean value) { +- if (!value) { +- this.default_name_type = null; +- } +- } +- +- public String getDefault_value_type() { +- return this.default_value_type; +- } +- +- public CqlMetadata setDefault_value_type(String default_value_type) { +- this.default_value_type = default_value_type; +- return this; +- } +- +- public void unsetDefault_value_type() { +- this.default_value_type = null; +- } +- +- /** Returns true if field default_value_type is set (has been assigned a value) and false otherwise */ +- public boolean isSetDefault_value_type() { +- return this.default_value_type != null; +- } +- +- public void setDefault_value_typeIsSet(boolean value) { +- if (!value) { +- this.default_value_type = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case NAME_TYPES: +- if (value == null) { +- unsetName_types(); +- } else { +- setName_types((Map)value); +- } +- break; +- +- case VALUE_TYPES: +- if (value == null) { +- unsetValue_types(); +- } else { +- setValue_types((Map)value); +- } +- break; +- +- case DEFAULT_NAME_TYPE: +- if (value == null) { +- unsetDefault_name_type(); +- } else { +- setDefault_name_type((String)value); +- } +- break; +- +- case DEFAULT_VALUE_TYPE: +- if (value == null) { +- unsetDefault_value_type(); +- } else { +- setDefault_value_type((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case NAME_TYPES: +- return getName_types(); +- +- case VALUE_TYPES: +- return getValue_types(); +- +- case DEFAULT_NAME_TYPE: +- return getDefault_name_type(); +- +- case DEFAULT_VALUE_TYPE: +- return getDefault_value_type(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case NAME_TYPES: +- return isSetName_types(); +- case VALUE_TYPES: +- return isSetValue_types(); +- case DEFAULT_NAME_TYPE: +- return isSetDefault_name_type(); +- case DEFAULT_VALUE_TYPE: +- return isSetDefault_value_type(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof CqlMetadata) +- return this.equals((CqlMetadata)that); +- return false; +- } +- +- public boolean equals(CqlMetadata that) { +- if (that == null) +- return false; +- +- boolean this_present_name_types = true && this.isSetName_types(); +- boolean that_present_name_types = true && that.isSetName_types(); +- if (this_present_name_types || that_present_name_types) { +- if (!(this_present_name_types && that_present_name_types)) +- return false; +- if (!this.name_types.equals(that.name_types)) +- return false; +- } +- +- boolean this_present_value_types = true && this.isSetValue_types(); +- boolean that_present_value_types = true && that.isSetValue_types(); +- if (this_present_value_types || that_present_value_types) { +- if (!(this_present_value_types && that_present_value_types)) +- return false; +- if (!this.value_types.equals(that.value_types)) +- return false; +- } +- +- boolean this_present_default_name_type = true && this.isSetDefault_name_type(); +- boolean that_present_default_name_type = true && that.isSetDefault_name_type(); +- if (this_present_default_name_type || that_present_default_name_type) { +- if (!(this_present_default_name_type && that_present_default_name_type)) +- return false; +- if (!this.default_name_type.equals(that.default_name_type)) +- return false; +- } +- +- boolean this_present_default_value_type = true && this.isSetDefault_value_type(); +- boolean that_present_default_value_type = true && that.isSetDefault_value_type(); +- if (this_present_default_value_type || that_present_default_value_type) { +- if (!(this_present_default_value_type && that_present_default_value_type)) +- return false; +- if (!this.default_value_type.equals(that.default_value_type)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_name_types = true && (isSetName_types()); +- builder.append(present_name_types); +- if (present_name_types) +- builder.append(name_types); +- +- boolean present_value_types = true && (isSetValue_types()); +- builder.append(present_value_types); +- if (present_value_types) +- builder.append(value_types); +- +- boolean present_default_name_type = true && (isSetDefault_name_type()); +- builder.append(present_default_name_type); +- if (present_default_name_type) +- builder.append(default_name_type); +- +- boolean present_default_value_type = true && (isSetDefault_value_type()); +- builder.append(present_default_value_type); +- if (present_default_value_type) +- builder.append(default_value_type); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(CqlMetadata other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetName_types()).compareTo(other.isSetName_types()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetName_types()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name_types, other.name_types); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetValue_types()).compareTo(other.isSetValue_types()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetValue_types()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value_types, other.value_types); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetDefault_name_type()).compareTo(other.isSetDefault_name_type()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetDefault_name_type()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.default_name_type, other.default_name_type); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetDefault_value_type()).compareTo(other.isSetDefault_value_type()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetDefault_value_type()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.default_value_type, other.default_value_type); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("CqlMetadata("); +- boolean first = true; +- +- sb.append("name_types:"); +- if (this.name_types == null) { +- sb.append("null"); +- } else { +- sb.append(this.name_types); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("value_types:"); +- if (this.value_types == null) { +- sb.append("null"); +- } else { +- sb.append(this.value_types); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("default_name_type:"); +- if (this.default_name_type == null) { +- sb.append("null"); +- } else { +- sb.append(this.default_name_type); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("default_value_type:"); +- if (this.default_value_type == null) { +- sb.append("null"); +- } else { +- sb.append(this.default_value_type); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (name_types == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'name_types' was not present! Struct: " + toString()); +- } +- if (value_types == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'value_types' was not present! Struct: " + toString()); +- } +- if (default_name_type == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'default_name_type' was not present! Struct: " + toString()); +- } +- if (default_value_type == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'default_value_type' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class CqlMetadataStandardSchemeFactory implements SchemeFactory { +- public CqlMetadataStandardScheme getScheme() { +- return new CqlMetadataStandardScheme(); +- } +- } +- +- private static class CqlMetadataStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, CqlMetadata struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // NAME_TYPES +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map172 = iprot.readMapBegin(); +- struct.name_types = new HashMap(2*_map172.size); +- for (int _i173 = 0; _i173 < _map172.size; ++_i173) +- { +- ByteBuffer _key174; +- String _val175; +- _key174 = iprot.readBinary(); +- _val175 = iprot.readString(); +- struct.name_types.put(_key174, _val175); +- } +- iprot.readMapEnd(); +- } +- struct.setName_typesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // VALUE_TYPES +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map176 = iprot.readMapBegin(); +- struct.value_types = new HashMap(2*_map176.size); +- for (int _i177 = 0; _i177 < _map176.size; ++_i177) +- { +- ByteBuffer _key178; +- String _val179; +- _key178 = iprot.readBinary(); +- _val179 = iprot.readString(); +- struct.value_types.put(_key178, _val179); +- } +- iprot.readMapEnd(); +- } +- struct.setValue_typesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // DEFAULT_NAME_TYPE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.default_name_type = iprot.readString(); +- struct.setDefault_name_typeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // DEFAULT_VALUE_TYPE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.default_value_type = iprot.readString(); +- struct.setDefault_value_typeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, CqlMetadata struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.name_types != null) { +- oprot.writeFieldBegin(NAME_TYPES_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.name_types.size())); +- for (Map.Entry _iter180 : struct.name_types.entrySet()) +- { +- oprot.writeBinary(_iter180.getKey()); +- oprot.writeString(_iter180.getValue()); +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.value_types != null) { +- oprot.writeFieldBegin(VALUE_TYPES_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.value_types.size())); +- for (Map.Entry _iter181 : struct.value_types.entrySet()) +- { +- oprot.writeBinary(_iter181.getKey()); +- oprot.writeString(_iter181.getValue()); +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.default_name_type != null) { +- oprot.writeFieldBegin(DEFAULT_NAME_TYPE_FIELD_DESC); +- oprot.writeString(struct.default_name_type); +- oprot.writeFieldEnd(); +- } +- if (struct.default_value_type != null) { +- oprot.writeFieldBegin(DEFAULT_VALUE_TYPE_FIELD_DESC); +- oprot.writeString(struct.default_value_type); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class CqlMetadataTupleSchemeFactory implements SchemeFactory { +- public CqlMetadataTupleScheme getScheme() { +- return new CqlMetadataTupleScheme(); +- } +- } +- +- private static class CqlMetadataTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, CqlMetadata struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- { +- oprot.writeI32(struct.name_types.size()); +- for (Map.Entry _iter182 : struct.name_types.entrySet()) +- { +- oprot.writeBinary(_iter182.getKey()); +- oprot.writeString(_iter182.getValue()); +- } +- } +- { +- oprot.writeI32(struct.value_types.size()); +- for (Map.Entry _iter183 : struct.value_types.entrySet()) +- { +- oprot.writeBinary(_iter183.getKey()); +- oprot.writeString(_iter183.getValue()); +- } +- } +- oprot.writeString(struct.default_name_type); +- oprot.writeString(struct.default_value_type); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, CqlMetadata struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- { +- org.apache.thrift.protocol.TMap _map184 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.name_types = new HashMap(2*_map184.size); +- for (int _i185 = 0; _i185 < _map184.size; ++_i185) +- { +- ByteBuffer _key186; +- String _val187; +- _key186 = iprot.readBinary(); +- _val187 = iprot.readString(); +- struct.name_types.put(_key186, _val187); +- } +- } +- struct.setName_typesIsSet(true); +- { +- org.apache.thrift.protocol.TMap _map188 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.value_types = new HashMap(2*_map188.size); +- for (int _i189 = 0; _i189 < _map188.size; ++_i189) +- { +- ByteBuffer _key190; +- String _val191; +- _key190 = iprot.readBinary(); +- _val191 = iprot.readString(); +- struct.value_types.put(_key190, _val191); +- } +- } +- struct.setValue_typesIsSet(true); +- struct.default_name_type = iprot.readString(); +- struct.setDefault_name_typeIsSet(true); +- struct.default_value_type = iprot.readString(); +- struct.setDefault_value_typeIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlPreparedResult.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlPreparedResult.java +deleted file mode 100644 +index b720a2d..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlPreparedResult.java ++++ /dev/null +@@ -1,821 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class CqlPreparedResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CqlPreparedResult"); +- +- private static final org.apache.thrift.protocol.TField ITEM_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("itemId", org.apache.thrift.protocol.TType.I32, (short)1); +- private static final org.apache.thrift.protocol.TField COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("count", org.apache.thrift.protocol.TType.I32, (short)2); +- private static final org.apache.thrift.protocol.TField VARIABLE_TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("variable_types", org.apache.thrift.protocol.TType.LIST, (short)3); +- private static final org.apache.thrift.protocol.TField VARIABLE_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("variable_names", org.apache.thrift.protocol.TType.LIST, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new CqlPreparedResultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new CqlPreparedResultTupleSchemeFactory()); +- } +- +- public int itemId; // required +- public int count; // required +- public List variable_types; // optional +- public List variable_names; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- ITEM_ID((short)1, "itemId"), +- COUNT((short)2, "count"), +- VARIABLE_TYPES((short)3, "variable_types"), +- VARIABLE_NAMES((short)4, "variable_names"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // ITEM_ID +- return ITEM_ID; +- case 2: // COUNT +- return COUNT; +- case 3: // VARIABLE_TYPES +- return VARIABLE_TYPES; +- case 4: // VARIABLE_NAMES +- return VARIABLE_NAMES; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __ITEMID_ISSET_ID = 0; +- private static final int __COUNT_ISSET_ID = 1; +- private byte __isset_bitfield = 0; +- private _Fields optionals[] = {_Fields.VARIABLE_TYPES,_Fields.VARIABLE_NAMES}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.ITEM_ID, new org.apache.thrift.meta_data.FieldMetaData("itemId", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.COUNT, new org.apache.thrift.meta_data.FieldMetaData("count", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.VARIABLE_TYPES, new org.apache.thrift.meta_data.FieldMetaData("variable_types", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- tmpMap.put(_Fields.VARIABLE_NAMES, new org.apache.thrift.meta_data.FieldMetaData("variable_names", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CqlPreparedResult.class, metaDataMap); +- } +- +- public CqlPreparedResult() { +- } +- +- public CqlPreparedResult( +- int itemId, +- int count) +- { +- this(); +- this.itemId = itemId; +- setItemIdIsSet(true); +- this.count = count; +- setCountIsSet(true); +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public CqlPreparedResult(CqlPreparedResult other) { +- __isset_bitfield = other.__isset_bitfield; +- this.itemId = other.itemId; +- this.count = other.count; +- if (other.isSetVariable_types()) { +- List __this__variable_types = new ArrayList(other.variable_types); +- this.variable_types = __this__variable_types; +- } +- if (other.isSetVariable_names()) { +- List __this__variable_names = new ArrayList(other.variable_names); +- this.variable_names = __this__variable_names; +- } +- } +- +- public CqlPreparedResult deepCopy() { +- return new CqlPreparedResult(this); +- } +- +- @Override +- public void clear() { +- setItemIdIsSet(false); +- this.itemId = 0; +- setCountIsSet(false); +- this.count = 0; +- this.variable_types = null; +- this.variable_names = null; +- } +- +- public int getItemId() { +- return this.itemId; +- } +- +- public CqlPreparedResult setItemId(int itemId) { +- this.itemId = itemId; +- setItemIdIsSet(true); +- return this; +- } +- +- public void unsetItemId() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ITEMID_ISSET_ID); +- } +- +- /** Returns true if field itemId is set (has been assigned a value) and false otherwise */ +- public boolean isSetItemId() { +- return EncodingUtils.testBit(__isset_bitfield, __ITEMID_ISSET_ID); +- } +- +- public void setItemIdIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ITEMID_ISSET_ID, value); +- } +- +- public int getCount() { +- return this.count; +- } +- +- public CqlPreparedResult setCount(int count) { +- this.count = count; +- setCountIsSet(true); +- return this; +- } +- +- public void unsetCount() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- /** Returns true if field count is set (has been assigned a value) and false otherwise */ +- public boolean isSetCount() { +- return EncodingUtils.testBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- public void setCountIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COUNT_ISSET_ID, value); +- } +- +- public int getVariable_typesSize() { +- return (this.variable_types == null) ? 0 : this.variable_types.size(); +- } +- +- public java.util.Iterator getVariable_typesIterator() { +- return (this.variable_types == null) ? null : this.variable_types.iterator(); +- } +- +- public void addToVariable_types(String elem) { +- if (this.variable_types == null) { +- this.variable_types = new ArrayList(); +- } +- this.variable_types.add(elem); +- } +- +- public List getVariable_types() { +- return this.variable_types; +- } +- +- public CqlPreparedResult setVariable_types(List variable_types) { +- this.variable_types = variable_types; +- return this; +- } +- +- public void unsetVariable_types() { +- this.variable_types = null; +- } +- +- /** Returns true if field variable_types is set (has been assigned a value) and false otherwise */ +- public boolean isSetVariable_types() { +- return this.variable_types != null; +- } +- +- public void setVariable_typesIsSet(boolean value) { +- if (!value) { +- this.variable_types = null; +- } +- } +- +- public int getVariable_namesSize() { +- return (this.variable_names == null) ? 0 : this.variable_names.size(); +- } +- +- public java.util.Iterator getVariable_namesIterator() { +- return (this.variable_names == null) ? null : this.variable_names.iterator(); +- } +- +- public void addToVariable_names(String elem) { +- if (this.variable_names == null) { +- this.variable_names = new ArrayList(); +- } +- this.variable_names.add(elem); +- } +- +- public List getVariable_names() { +- return this.variable_names; +- } +- +- public CqlPreparedResult setVariable_names(List variable_names) { +- this.variable_names = variable_names; +- return this; +- } +- +- public void unsetVariable_names() { +- this.variable_names = null; +- } +- +- /** Returns true if field variable_names is set (has been assigned a value) and false otherwise */ +- public boolean isSetVariable_names() { +- return this.variable_names != null; +- } +- +- public void setVariable_namesIsSet(boolean value) { +- if (!value) { +- this.variable_names = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case ITEM_ID: +- if (value == null) { +- unsetItemId(); +- } else { +- setItemId((Integer)value); +- } +- break; +- +- case COUNT: +- if (value == null) { +- unsetCount(); +- } else { +- setCount((Integer)value); +- } +- break; +- +- case VARIABLE_TYPES: +- if (value == null) { +- unsetVariable_types(); +- } else { +- setVariable_types((List)value); +- } +- break; +- +- case VARIABLE_NAMES: +- if (value == null) { +- unsetVariable_names(); +- } else { +- setVariable_names((List)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case ITEM_ID: +- return Integer.valueOf(getItemId()); +- +- case COUNT: +- return Integer.valueOf(getCount()); +- +- case VARIABLE_TYPES: +- return getVariable_types(); +- +- case VARIABLE_NAMES: +- return getVariable_names(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case ITEM_ID: +- return isSetItemId(); +- case COUNT: +- return isSetCount(); +- case VARIABLE_TYPES: +- return isSetVariable_types(); +- case VARIABLE_NAMES: +- return isSetVariable_names(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof CqlPreparedResult) +- return this.equals((CqlPreparedResult)that); +- return false; +- } +- +- public boolean equals(CqlPreparedResult that) { +- if (that == null) +- return false; +- +- boolean this_present_itemId = true; +- boolean that_present_itemId = true; +- if (this_present_itemId || that_present_itemId) { +- if (!(this_present_itemId && that_present_itemId)) +- return false; +- if (this.itemId != that.itemId) +- return false; +- } +- +- boolean this_present_count = true; +- boolean that_present_count = true; +- if (this_present_count || that_present_count) { +- if (!(this_present_count && that_present_count)) +- return false; +- if (this.count != that.count) +- return false; +- } +- +- boolean this_present_variable_types = true && this.isSetVariable_types(); +- boolean that_present_variable_types = true && that.isSetVariable_types(); +- if (this_present_variable_types || that_present_variable_types) { +- if (!(this_present_variable_types && that_present_variable_types)) +- return false; +- if (!this.variable_types.equals(that.variable_types)) +- return false; +- } +- +- boolean this_present_variable_names = true && this.isSetVariable_names(); +- boolean that_present_variable_names = true && that.isSetVariable_names(); +- if (this_present_variable_names || that_present_variable_names) { +- if (!(this_present_variable_names && that_present_variable_names)) +- return false; +- if (!this.variable_names.equals(that.variable_names)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_itemId = true; +- builder.append(present_itemId); +- if (present_itemId) +- builder.append(itemId); +- +- boolean present_count = true; +- builder.append(present_count); +- if (present_count) +- builder.append(count); +- +- boolean present_variable_types = true && (isSetVariable_types()); +- builder.append(present_variable_types); +- if (present_variable_types) +- builder.append(variable_types); +- +- boolean present_variable_names = true && (isSetVariable_names()); +- builder.append(present_variable_names); +- if (present_variable_names) +- builder.append(variable_names); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(CqlPreparedResult other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetItemId()).compareTo(other.isSetItemId()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetItemId()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.itemId, other.itemId); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCount()).compareTo(other.isSetCount()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCount()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.count, other.count); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetVariable_types()).compareTo(other.isSetVariable_types()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetVariable_types()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.variable_types, other.variable_types); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetVariable_names()).compareTo(other.isSetVariable_names()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetVariable_names()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.variable_names, other.variable_names); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("CqlPreparedResult("); +- boolean first = true; +- +- sb.append("itemId:"); +- sb.append(this.itemId); +- first = false; +- if (!first) sb.append(", "); +- sb.append("count:"); +- sb.append(this.count); +- first = false; +- if (isSetVariable_types()) { +- if (!first) sb.append(", "); +- sb.append("variable_types:"); +- if (this.variable_types == null) { +- sb.append("null"); +- } else { +- sb.append(this.variable_types); +- } +- first = false; +- } +- if (isSetVariable_names()) { +- if (!first) sb.append(", "); +- sb.append("variable_names:"); +- if (this.variable_names == null) { +- sb.append("null"); +- } else { +- sb.append(this.variable_names); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // alas, we cannot check 'itemId' because it's a primitive and you chose the non-beans generator. +- // alas, we cannot check 'count' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class CqlPreparedResultStandardSchemeFactory implements SchemeFactory { +- public CqlPreparedResultStandardScheme getScheme() { +- return new CqlPreparedResultStandardScheme(); +- } +- } +- +- private static class CqlPreparedResultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, CqlPreparedResult struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // ITEM_ID +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.itemId = iprot.readI32(); +- struct.setItemIdIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COUNT +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // VARIABLE_TYPES +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list200 = iprot.readListBegin(); +- struct.variable_types = new ArrayList(_list200.size); +- for (int _i201 = 0; _i201 < _list200.size; ++_i201) +- { +- String _elem202; +- _elem202 = iprot.readString(); +- struct.variable_types.add(_elem202); +- } +- iprot.readListEnd(); +- } +- struct.setVariable_typesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // VARIABLE_NAMES +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list203 = iprot.readListBegin(); +- struct.variable_names = new ArrayList(_list203.size); +- for (int _i204 = 0; _i204 < _list203.size; ++_i204) +- { +- String _elem205; +- _elem205 = iprot.readString(); +- struct.variable_names.add(_elem205); +- } +- iprot.readListEnd(); +- } +- struct.setVariable_namesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetItemId()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'itemId' was not found in serialized data! Struct: " + toString()); +- } +- if (!struct.isSetCount()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'count' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, CqlPreparedResult struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldBegin(ITEM_ID_FIELD_DESC); +- oprot.writeI32(struct.itemId); +- oprot.writeFieldEnd(); +- oprot.writeFieldBegin(COUNT_FIELD_DESC); +- oprot.writeI32(struct.count); +- oprot.writeFieldEnd(); +- if (struct.variable_types != null) { +- if (struct.isSetVariable_types()) { +- oprot.writeFieldBegin(VARIABLE_TYPES_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.variable_types.size())); +- for (String _iter206 : struct.variable_types) +- { +- oprot.writeString(_iter206); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- if (struct.variable_names != null) { +- if (struct.isSetVariable_names()) { +- oprot.writeFieldBegin(VARIABLE_NAMES_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.variable_names.size())); +- for (String _iter207 : struct.variable_names) +- { +- oprot.writeString(_iter207); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class CqlPreparedResultTupleSchemeFactory implements SchemeFactory { +- public CqlPreparedResultTupleScheme getScheme() { +- return new CqlPreparedResultTupleScheme(); +- } +- } +- +- private static class CqlPreparedResultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, CqlPreparedResult struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeI32(struct.itemId); +- oprot.writeI32(struct.count); +- BitSet optionals = new BitSet(); +- if (struct.isSetVariable_types()) { +- optionals.set(0); +- } +- if (struct.isSetVariable_names()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetVariable_types()) { +- { +- oprot.writeI32(struct.variable_types.size()); +- for (String _iter208 : struct.variable_types) +- { +- oprot.writeString(_iter208); +- } +- } +- } +- if (struct.isSetVariable_names()) { +- { +- oprot.writeI32(struct.variable_names.size()); +- for (String _iter209 : struct.variable_names) +- { +- oprot.writeString(_iter209); +- } +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, CqlPreparedResult struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.itemId = iprot.readI32(); +- struct.setItemIdIsSet(true); +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list210 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.variable_types = new ArrayList(_list210.size); +- for (int _i211 = 0; _i211 < _list210.size; ++_i211) +- { +- String _elem212; +- _elem212 = iprot.readString(); +- struct.variable_types.add(_elem212); +- } +- } +- struct.setVariable_typesIsSet(true); +- } +- if (incoming.get(1)) { +- { +- org.apache.thrift.protocol.TList _list213 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.variable_names = new ArrayList(_list213.size); +- for (int _i214 = 0; _i214 < _list213.size; ++_i214) +- { +- String _elem215; +- _elem215 = iprot.readString(); +- struct.variable_names.add(_elem215); +- } +- } +- struct.setVariable_namesIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlResult.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlResult.java +deleted file mode 100644 +index 1cdbe07..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlResult.java ++++ /dev/null +@@ -1,807 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class CqlResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CqlResult"); +- +- private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)1); +- private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST, (short)2); +- private static final org.apache.thrift.protocol.TField NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("num", org.apache.thrift.protocol.TType.I32, (short)3); +- private static final org.apache.thrift.protocol.TField SCHEMA_FIELD_DESC = new org.apache.thrift.protocol.TField("schema", org.apache.thrift.protocol.TType.STRUCT, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new CqlResultStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new CqlResultTupleSchemeFactory()); +- } +- +- /** +- * +- * @see CqlResultType +- */ +- public CqlResultType type; // required +- public List rows; // optional +- public int num; // optional +- public CqlMetadata schema; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- /** +- * +- * @see CqlResultType +- */ +- TYPE((short)1, "type"), +- ROWS((short)2, "rows"), +- NUM((short)3, "num"), +- SCHEMA((short)4, "schema"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // TYPE +- return TYPE; +- case 2: // ROWS +- return ROWS; +- case 3: // NUM +- return NUM; +- case 4: // SCHEMA +- return SCHEMA; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __NUM_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- private _Fields optionals[] = {_Fields.ROWS,_Fields.NUM,_Fields.SCHEMA}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, CqlResultType.class))); +- tmpMap.put(_Fields.ROWS, new org.apache.thrift.meta_data.FieldMetaData("rows", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CqlRow.class)))); +- tmpMap.put(_Fields.NUM, new org.apache.thrift.meta_data.FieldMetaData("num", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.SCHEMA, new org.apache.thrift.meta_data.FieldMetaData("schema", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CqlMetadata.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CqlResult.class, metaDataMap); +- } +- +- public CqlResult() { +- } +- +- public CqlResult( +- CqlResultType type) +- { +- this(); +- this.type = type; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public CqlResult(CqlResult other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetType()) { +- this.type = other.type; +- } +- if (other.isSetRows()) { +- List __this__rows = new ArrayList(other.rows.size()); +- for (CqlRow other_element : other.rows) { +- __this__rows.add(new CqlRow(other_element)); +- } +- this.rows = __this__rows; +- } +- this.num = other.num; +- if (other.isSetSchema()) { +- this.schema = new CqlMetadata(other.schema); +- } +- } +- +- public CqlResult deepCopy() { +- return new CqlResult(this); +- } +- +- @Override +- public void clear() { +- this.type = null; +- this.rows = null; +- setNumIsSet(false); +- this.num = 0; +- this.schema = null; +- } +- +- /** +- * +- * @see CqlResultType +- */ +- public CqlResultType getType() { +- return this.type; +- } +- +- /** +- * +- * @see CqlResultType +- */ +- public CqlResult setType(CqlResultType type) { +- this.type = type; +- return this; +- } +- +- public void unsetType() { +- this.type = null; +- } +- +- /** Returns true if field type is set (has been assigned a value) and false otherwise */ +- public boolean isSetType() { +- return this.type != null; +- } +- +- public void setTypeIsSet(boolean value) { +- if (!value) { +- this.type = null; +- } +- } +- +- public int getRowsSize() { +- return (this.rows == null) ? 0 : this.rows.size(); +- } +- +- public java.util.Iterator getRowsIterator() { +- return (this.rows == null) ? null : this.rows.iterator(); +- } +- +- public void addToRows(CqlRow elem) { +- if (this.rows == null) { +- this.rows = new ArrayList(); +- } +- this.rows.add(elem); +- } +- +- public List getRows() { +- return this.rows; +- } +- +- public CqlResult setRows(List rows) { +- this.rows = rows; +- return this; +- } +- +- public void unsetRows() { +- this.rows = null; +- } +- +- /** Returns true if field rows is set (has been assigned a value) and false otherwise */ +- public boolean isSetRows() { +- return this.rows != null; +- } +- +- public void setRowsIsSet(boolean value) { +- if (!value) { +- this.rows = null; +- } +- } +- +- public int getNum() { +- return this.num; +- } +- +- public CqlResult setNum(int num) { +- this.num = num; +- setNumIsSet(true); +- return this; +- } +- +- public void unsetNum() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_ISSET_ID); +- } +- +- /** Returns true if field num is set (has been assigned a value) and false otherwise */ +- public boolean isSetNum() { +- return EncodingUtils.testBit(__isset_bitfield, __NUM_ISSET_ID); +- } +- +- public void setNumIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_ISSET_ID, value); +- } +- +- public CqlMetadata getSchema() { +- return this.schema; +- } +- +- public CqlResult setSchema(CqlMetadata schema) { +- this.schema = schema; +- return this; +- } +- +- public void unsetSchema() { +- this.schema = null; +- } +- +- /** Returns true if field schema is set (has been assigned a value) and false otherwise */ +- public boolean isSetSchema() { +- return this.schema != null; +- } +- +- public void setSchemaIsSet(boolean value) { +- if (!value) { +- this.schema = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case TYPE: +- if (value == null) { +- unsetType(); +- } else { +- setType((CqlResultType)value); +- } +- break; +- +- case ROWS: +- if (value == null) { +- unsetRows(); +- } else { +- setRows((List)value); +- } +- break; +- +- case NUM: +- if (value == null) { +- unsetNum(); +- } else { +- setNum((Integer)value); +- } +- break; +- +- case SCHEMA: +- if (value == null) { +- unsetSchema(); +- } else { +- setSchema((CqlMetadata)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case TYPE: +- return getType(); +- +- case ROWS: +- return getRows(); +- +- case NUM: +- return Integer.valueOf(getNum()); +- +- case SCHEMA: +- return getSchema(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case TYPE: +- return isSetType(); +- case ROWS: +- return isSetRows(); +- case NUM: +- return isSetNum(); +- case SCHEMA: +- return isSetSchema(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof CqlResult) +- return this.equals((CqlResult)that); +- return false; +- } +- +- public boolean equals(CqlResult that) { +- if (that == null) +- return false; +- +- boolean this_present_type = true && this.isSetType(); +- boolean that_present_type = true && that.isSetType(); +- if (this_present_type || that_present_type) { +- if (!(this_present_type && that_present_type)) +- return false; +- if (!this.type.equals(that.type)) +- return false; +- } +- +- boolean this_present_rows = true && this.isSetRows(); +- boolean that_present_rows = true && that.isSetRows(); +- if (this_present_rows || that_present_rows) { +- if (!(this_present_rows && that_present_rows)) +- return false; +- if (!this.rows.equals(that.rows)) +- return false; +- } +- +- boolean this_present_num = true && this.isSetNum(); +- boolean that_present_num = true && that.isSetNum(); +- if (this_present_num || that_present_num) { +- if (!(this_present_num && that_present_num)) +- return false; +- if (this.num != that.num) +- return false; +- } +- +- boolean this_present_schema = true && this.isSetSchema(); +- boolean that_present_schema = true && that.isSetSchema(); +- if (this_present_schema || that_present_schema) { +- if (!(this_present_schema && that_present_schema)) +- return false; +- if (!this.schema.equals(that.schema)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_type = true && (isSetType()); +- builder.append(present_type); +- if (present_type) +- builder.append(type.getValue()); +- +- boolean present_rows = true && (isSetRows()); +- builder.append(present_rows); +- if (present_rows) +- builder.append(rows); +- +- boolean present_num = true && (isSetNum()); +- builder.append(present_num); +- if (present_num) +- builder.append(num); +- +- boolean present_schema = true && (isSetSchema()); +- builder.append(present_schema); +- if (present_schema) +- builder.append(schema); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(CqlResult other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetType()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRows()).compareTo(other.isSetRows()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRows()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rows, other.rows); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetNum()).compareTo(other.isSetNum()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetNum()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num, other.num); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSchema()).compareTo(other.isSetSchema()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSchema()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schema, other.schema); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("CqlResult("); +- boolean first = true; +- +- sb.append("type:"); +- if (this.type == null) { +- sb.append("null"); +- } else { +- sb.append(this.type); +- } +- first = false; +- if (isSetRows()) { +- if (!first) sb.append(", "); +- sb.append("rows:"); +- if (this.rows == null) { +- sb.append("null"); +- } else { +- sb.append(this.rows); +- } +- first = false; +- } +- if (isSetNum()) { +- if (!first) sb.append(", "); +- sb.append("num:"); +- sb.append(this.num); +- first = false; +- } +- if (isSetSchema()) { +- if (!first) sb.append(", "); +- sb.append("schema:"); +- if (this.schema == null) { +- sb.append("null"); +- } else { +- sb.append(this.schema); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (type == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'type' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- if (schema != null) { +- schema.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class CqlResultStandardSchemeFactory implements SchemeFactory { +- public CqlResultStandardScheme getScheme() { +- return new CqlResultStandardScheme(); +- } +- } +- +- private static class CqlResultStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, CqlResult struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // TYPE +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.type = CqlResultType.findByValue(iprot.readI32()); +- struct.setTypeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // ROWS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list192 = iprot.readListBegin(); +- struct.rows = new ArrayList(_list192.size); +- for (int _i193 = 0; _i193 < _list192.size; ++_i193) +- { +- CqlRow _elem194; +- _elem194 = new CqlRow(); +- _elem194.read(iprot); +- struct.rows.add(_elem194); +- } +- iprot.readListEnd(); +- } +- struct.setRowsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // NUM +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.num = iprot.readI32(); +- struct.setNumIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // SCHEMA +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.schema = new CqlMetadata(); +- struct.schema.read(iprot); +- struct.setSchemaIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, CqlResult struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.type != null) { +- oprot.writeFieldBegin(TYPE_FIELD_DESC); +- oprot.writeI32(struct.type.getValue()); +- oprot.writeFieldEnd(); +- } +- if (struct.rows != null) { +- if (struct.isSetRows()) { +- oprot.writeFieldBegin(ROWS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.rows.size())); +- for (CqlRow _iter195 : struct.rows) +- { +- _iter195.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetNum()) { +- oprot.writeFieldBegin(NUM_FIELD_DESC); +- oprot.writeI32(struct.num); +- oprot.writeFieldEnd(); +- } +- if (struct.schema != null) { +- if (struct.isSetSchema()) { +- oprot.writeFieldBegin(SCHEMA_FIELD_DESC); +- struct.schema.write(oprot); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class CqlResultTupleSchemeFactory implements SchemeFactory { +- public CqlResultTupleScheme getScheme() { +- return new CqlResultTupleScheme(); +- } +- } +- +- private static class CqlResultTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, CqlResult struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeI32(struct.type.getValue()); +- BitSet optionals = new BitSet(); +- if (struct.isSetRows()) { +- optionals.set(0); +- } +- if (struct.isSetNum()) { +- optionals.set(1); +- } +- if (struct.isSetSchema()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetRows()) { +- { +- oprot.writeI32(struct.rows.size()); +- for (CqlRow _iter196 : struct.rows) +- { +- _iter196.write(oprot); +- } +- } +- } +- if (struct.isSetNum()) { +- oprot.writeI32(struct.num); +- } +- if (struct.isSetSchema()) { +- struct.schema.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, CqlResult struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.type = CqlResultType.findByValue(iprot.readI32()); +- struct.setTypeIsSet(true); +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list197 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.rows = new ArrayList(_list197.size); +- for (int _i198 = 0; _i198 < _list197.size; ++_i198) +- { +- CqlRow _elem199; +- _elem199 = new CqlRow(); +- _elem199.read(iprot); +- struct.rows.add(_elem199); +- } +- } +- struct.setRowsIsSet(true); +- } +- if (incoming.get(1)) { +- struct.num = iprot.readI32(); +- struct.setNumIsSet(true); +- } +- if (incoming.get(2)) { +- struct.schema = new CqlMetadata(); +- struct.schema.read(iprot); +- struct.setSchemaIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlResultType.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlResultType.java +deleted file mode 100644 +index 2928f68..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlResultType.java ++++ /dev/null +@@ -1,69 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +- +-import java.util.Map; +-import java.util.HashMap; +-import org.apache.thrift.TEnum; +- +-public enum CqlResultType implements org.apache.thrift.TEnum { +- ROWS(1), +- VOID(2), +- INT(3); +- +- private final int value; +- +- private CqlResultType(int value) { +- this.value = value; +- } +- +- /** +- * Get the integer value of this enum value, as defined in the Thrift IDL. +- */ +- public int getValue() { +- return value; +- } +- +- /** +- * Find a the enum type by its integer value, as defined in the Thrift IDL. +- * @return null if the value is not found. +- */ +- public static CqlResultType findByValue(int value) { +- switch (value) { +- case 1: +- return ROWS; +- case 2: +- return VOID; +- case 3: +- return INT; +- default: +- return null; +- } +- } +-} +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlRow.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlRow.java +deleted file mode 100644 +index 7487ed7..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlRow.java ++++ /dev/null +@@ -1,584 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Row returned from a CQL query. +- * +- * This struct is used for both CQL2 and CQL3 queries. For CQL2, the partition key +- * is special-cased and is always returned. For CQL3, it is not special cased; +- * it will be included in the columns list if it was included in the SELECT and +- * the key field is always null. +- */ +-public class CqlRow implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CqlRow"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new CqlRowStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new CqlRowTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public List columns; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COLUMNS((short)2, "columns"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COLUMNS +- return COLUMNS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Column.class)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CqlRow.class, metaDataMap); +- } +- +- public CqlRow() { +- } +- +- public CqlRow( +- ByteBuffer key, +- List columns) +- { +- this(); +- this.key = key; +- this.columns = columns; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public CqlRow(CqlRow other) { +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetColumns()) { +- List __this__columns = new ArrayList(other.columns.size()); +- for (Column other_element : other.columns) { +- __this__columns.add(new Column(other_element)); +- } +- this.columns = __this__columns; +- } +- } +- +- public CqlRow deepCopy() { +- return new CqlRow(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.columns = null; +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public CqlRow setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public CqlRow setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public int getColumnsSize() { +- return (this.columns == null) ? 0 : this.columns.size(); +- } +- +- public java.util.Iterator getColumnsIterator() { +- return (this.columns == null) ? null : this.columns.iterator(); +- } +- +- public void addToColumns(Column elem) { +- if (this.columns == null) { +- this.columns = new ArrayList(); +- } +- this.columns.add(elem); +- } +- +- public List getColumns() { +- return this.columns; +- } +- +- public CqlRow setColumns(List columns) { +- this.columns = columns; +- return this; +- } +- +- public void unsetColumns() { +- this.columns = null; +- } +- +- /** Returns true if field columns is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumns() { +- return this.columns != null; +- } +- +- public void setColumnsIsSet(boolean value) { +- if (!value) { +- this.columns = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COLUMNS: +- if (value == null) { +- unsetColumns(); +- } else { +- setColumns((List)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COLUMNS: +- return getColumns(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COLUMNS: +- return isSetColumns(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof CqlRow) +- return this.equals((CqlRow)that); +- return false; +- } +- +- public boolean equals(CqlRow that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_columns = true && this.isSetColumns(); +- boolean that_present_columns = true && that.isSetColumns(); +- if (this_present_columns || that_present_columns) { +- if (!(this_present_columns && that_present_columns)) +- return false; +- if (!this.columns.equals(that.columns)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_columns = true && (isSetColumns()); +- builder.append(present_columns); +- if (present_columns) +- builder.append(columns); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(CqlRow other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumns()).compareTo(other.isSetColumns()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumns()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columns, other.columns); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("CqlRow("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("columns:"); +- if (this.columns == null) { +- sb.append("null"); +- } else { +- sb.append(this.columns); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- if (columns == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'columns' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class CqlRowStandardSchemeFactory implements SchemeFactory { +- public CqlRowStandardScheme getScheme() { +- return new CqlRowStandardScheme(); +- } +- } +- +- private static class CqlRowStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, CqlRow struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMNS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list164 = iprot.readListBegin(); +- struct.columns = new ArrayList(_list164.size); +- for (int _i165 = 0; _i165 < _list164.size; ++_i165) +- { +- Column _elem166; +- _elem166 = new Column(); +- _elem166.read(iprot); +- struct.columns.add(_elem166); +- } +- iprot.readListEnd(); +- } +- struct.setColumnsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, CqlRow struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- if (struct.columns != null) { +- oprot.writeFieldBegin(COLUMNS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size())); +- for (Column _iter167 : struct.columns) +- { +- _iter167.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class CqlRowTupleSchemeFactory implements SchemeFactory { +- public CqlRowTupleScheme getScheme() { +- return new CqlRowTupleScheme(); +- } +- } +- +- private static class CqlRowTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, CqlRow struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- { +- oprot.writeI32(struct.columns.size()); +- for (Column _iter168 : struct.columns) +- { +- _iter168.write(oprot); +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, CqlRow struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- { +- org.apache.thrift.protocol.TList _list169 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.columns = new ArrayList(_list169.size); +- for (int _i170 = 0; _i170 < _list169.size; ++_i170) +- { +- Column _elem171; +- _elem171 = new Column(); +- _elem171.read(iprot); +- struct.columns.add(_elem171); +- } +- } +- struct.setColumnsIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/Deletion.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/Deletion.java +deleted file mode 100644 +index c98e449..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/Deletion.java ++++ /dev/null +@@ -1,645 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Note that the timestamp is only optional in case of counter deletion. +- */ +-public class Deletion implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Deletion"); +- +- private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)1); +- private static final org.apache.thrift.protocol.TField SUPER_COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("super_column", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField PREDICATE_FIELD_DESC = new org.apache.thrift.protocol.TField("predicate", org.apache.thrift.protocol.TType.STRUCT, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new DeletionStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new DeletionTupleSchemeFactory()); +- } +- +- public long timestamp; // optional +- public ByteBuffer super_column; // optional +- public SlicePredicate predicate; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- TIMESTAMP((short)1, "timestamp"), +- SUPER_COLUMN((short)2, "super_column"), +- PREDICATE((short)3, "predicate"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // TIMESTAMP +- return TIMESTAMP; +- case 2: // SUPER_COLUMN +- return SUPER_COLUMN; +- case 3: // PREDICATE +- return PREDICATE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __TIMESTAMP_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- private _Fields optionals[] = {_Fields.TIMESTAMP,_Fields.SUPER_COLUMN,_Fields.PREDICATE}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); +- tmpMap.put(_Fields.SUPER_COLUMN, new org.apache.thrift.meta_data.FieldMetaData("super_column", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.PREDICATE, new org.apache.thrift.meta_data.FieldMetaData("predicate", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SlicePredicate.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Deletion.class, metaDataMap); +- } +- +- public Deletion() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public Deletion(Deletion other) { +- __isset_bitfield = other.__isset_bitfield; +- this.timestamp = other.timestamp; +- if (other.isSetSuper_column()) { +- this.super_column = org.apache.thrift.TBaseHelper.copyBinary(other.super_column); +-; +- } +- if (other.isSetPredicate()) { +- this.predicate = new SlicePredicate(other.predicate); +- } +- } +- +- public Deletion deepCopy() { +- return new Deletion(this); +- } +- +- @Override +- public void clear() { +- setTimestampIsSet(false); +- this.timestamp = 0; +- this.super_column = null; +- this.predicate = null; +- } +- +- public long getTimestamp() { +- return this.timestamp; +- } +- +- public Deletion setTimestamp(long timestamp) { +- this.timestamp = timestamp; +- setTimestampIsSet(true); +- return this; +- } +- +- public void unsetTimestamp() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID); +- } +- +- /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */ +- public boolean isSetTimestamp() { +- return EncodingUtils.testBit(__isset_bitfield, __TIMESTAMP_ISSET_ID); +- } +- +- public void setTimestampIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value); +- } +- +- public byte[] getSuper_column() { +- setSuper_column(org.apache.thrift.TBaseHelper.rightSize(super_column)); +- return super_column == null ? null : super_column.array(); +- } +- +- public ByteBuffer bufferForSuper_column() { +- return super_column; +- } +- +- public Deletion setSuper_column(byte[] super_column) { +- setSuper_column(super_column == null ? (ByteBuffer)null : ByteBuffer.wrap(super_column)); +- return this; +- } +- +- public Deletion setSuper_column(ByteBuffer super_column) { +- this.super_column = super_column; +- return this; +- } +- +- public void unsetSuper_column() { +- this.super_column = null; +- } +- +- /** Returns true if field super_column is set (has been assigned a value) and false otherwise */ +- public boolean isSetSuper_column() { +- return this.super_column != null; +- } +- +- public void setSuper_columnIsSet(boolean value) { +- if (!value) { +- this.super_column = null; +- } +- } +- +- public SlicePredicate getPredicate() { +- return this.predicate; +- } +- +- public Deletion setPredicate(SlicePredicate predicate) { +- this.predicate = predicate; +- return this; +- } +- +- public void unsetPredicate() { +- this.predicate = null; +- } +- +- /** Returns true if field predicate is set (has been assigned a value) and false otherwise */ +- public boolean isSetPredicate() { +- return this.predicate != null; +- } +- +- public void setPredicateIsSet(boolean value) { +- if (!value) { +- this.predicate = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case TIMESTAMP: +- if (value == null) { +- unsetTimestamp(); +- } else { +- setTimestamp((Long)value); +- } +- break; +- +- case SUPER_COLUMN: +- if (value == null) { +- unsetSuper_column(); +- } else { +- setSuper_column((ByteBuffer)value); +- } +- break; +- +- case PREDICATE: +- if (value == null) { +- unsetPredicate(); +- } else { +- setPredicate((SlicePredicate)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case TIMESTAMP: +- return Long.valueOf(getTimestamp()); +- +- case SUPER_COLUMN: +- return getSuper_column(); +- +- case PREDICATE: +- return getPredicate(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case TIMESTAMP: +- return isSetTimestamp(); +- case SUPER_COLUMN: +- return isSetSuper_column(); +- case PREDICATE: +- return isSetPredicate(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof Deletion) +- return this.equals((Deletion)that); +- return false; +- } +- +- public boolean equals(Deletion that) { +- if (that == null) +- return false; +- +- boolean this_present_timestamp = true && this.isSetTimestamp(); +- boolean that_present_timestamp = true && that.isSetTimestamp(); +- if (this_present_timestamp || that_present_timestamp) { +- if (!(this_present_timestamp && that_present_timestamp)) +- return false; +- if (this.timestamp != that.timestamp) +- return false; +- } +- +- boolean this_present_super_column = true && this.isSetSuper_column(); +- boolean that_present_super_column = true && that.isSetSuper_column(); +- if (this_present_super_column || that_present_super_column) { +- if (!(this_present_super_column && that_present_super_column)) +- return false; +- if (!this.super_column.equals(that.super_column)) +- return false; +- } +- +- boolean this_present_predicate = true && this.isSetPredicate(); +- boolean that_present_predicate = true && that.isSetPredicate(); +- if (this_present_predicate || that_present_predicate) { +- if (!(this_present_predicate && that_present_predicate)) +- return false; +- if (!this.predicate.equals(that.predicate)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_timestamp = true && (isSetTimestamp()); +- builder.append(present_timestamp); +- if (present_timestamp) +- builder.append(timestamp); +- +- boolean present_super_column = true && (isSetSuper_column()); +- builder.append(present_super_column); +- if (present_super_column) +- builder.append(super_column); +- +- boolean present_predicate = true && (isSetPredicate()); +- builder.append(present_predicate); +- if (present_predicate) +- builder.append(predicate); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(Deletion other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetTimestamp()).compareTo(other.isSetTimestamp()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetTimestamp()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timestamp, other.timestamp); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSuper_column()).compareTo(other.isSetSuper_column()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSuper_column()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.super_column, other.super_column); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetPredicate()).compareTo(other.isSetPredicate()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetPredicate()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.predicate, other.predicate); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("Deletion("); +- boolean first = true; +- +- if (isSetTimestamp()) { +- sb.append("timestamp:"); +- sb.append(this.timestamp); +- first = false; +- } +- if (isSetSuper_column()) { +- if (!first) sb.append(", "); +- sb.append("super_column:"); +- if (this.super_column == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.super_column, sb); +- } +- first = false; +- } +- if (isSetPredicate()) { +- if (!first) sb.append(", "); +- sb.append("predicate:"); +- if (this.predicate == null) { +- sb.append("null"); +- } else { +- sb.append(this.predicate); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (predicate != null) { +- predicate.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class DeletionStandardSchemeFactory implements SchemeFactory { +- public DeletionStandardScheme getScheme() { +- return new DeletionStandardScheme(); +- } +- } +- +- private static class DeletionStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, Deletion struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // TIMESTAMP +- if (schemeField.type == org.apache.thrift.protocol.TType.I64) { +- struct.timestamp = iprot.readI64(); +- struct.setTimestampIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // SUPER_COLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.super_column = iprot.readBinary(); +- struct.setSuper_columnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // PREDICATE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, Deletion struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.isSetTimestamp()) { +- oprot.writeFieldBegin(TIMESTAMP_FIELD_DESC); +- oprot.writeI64(struct.timestamp); +- oprot.writeFieldEnd(); +- } +- if (struct.super_column != null) { +- if (struct.isSetSuper_column()) { +- oprot.writeFieldBegin(SUPER_COLUMN_FIELD_DESC); +- oprot.writeBinary(struct.super_column); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.predicate != null) { +- if (struct.isSetPredicate()) { +- oprot.writeFieldBegin(PREDICATE_FIELD_DESC); +- struct.predicate.write(oprot); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class DeletionTupleSchemeFactory implements SchemeFactory { +- public DeletionTupleScheme getScheme() { +- return new DeletionTupleScheme(); +- } +- } +- +- private static class DeletionTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, Deletion struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetTimestamp()) { +- optionals.set(0); +- } +- if (struct.isSetSuper_column()) { +- optionals.set(1); +- } +- if (struct.isSetPredicate()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetTimestamp()) { +- oprot.writeI64(struct.timestamp); +- } +- if (struct.isSetSuper_column()) { +- oprot.writeBinary(struct.super_column); +- } +- if (struct.isSetPredicate()) { +- struct.predicate.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, Deletion struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.timestamp = iprot.readI64(); +- struct.setTimestampIsSet(true); +- } +- if (incoming.get(1)) { +- struct.super_column = iprot.readBinary(); +- struct.setSuper_columnIsSet(true); +- } +- if (incoming.get(2)) { +- struct.predicate = new SlicePredicate(); +- struct.predicate.read(iprot); +- struct.setPredicateIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/EndpointDetails.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/EndpointDetails.java +deleted file mode 100644 +index 69fcf58..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/EndpointDetails.java ++++ /dev/null +@@ -1,630 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class EndpointDetails implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("EndpointDetails"); +- +- private static final org.apache.thrift.protocol.TField HOST_FIELD_DESC = new org.apache.thrift.protocol.TField("host", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField DATACENTER_FIELD_DESC = new org.apache.thrift.protocol.TField("datacenter", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField RACK_FIELD_DESC = new org.apache.thrift.protocol.TField("rack", org.apache.thrift.protocol.TType.STRING, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new EndpointDetailsStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new EndpointDetailsTupleSchemeFactory()); +- } +- +- public String host; // required +- public String datacenter; // required +- public String rack; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- HOST((short)1, "host"), +- DATACENTER((short)2, "datacenter"), +- RACK((short)3, "rack"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // HOST +- return HOST; +- case 2: // DATACENTER +- return DATACENTER; +- case 3: // RACK +- return RACK; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private _Fields optionals[] = {_Fields.RACK}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.HOST, new org.apache.thrift.meta_data.FieldMetaData("host", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.DATACENTER, new org.apache.thrift.meta_data.FieldMetaData("datacenter", org.apache.thrift.TFieldRequirementType.DEFAULT, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.RACK, new org.apache.thrift.meta_data.FieldMetaData("rack", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(EndpointDetails.class, metaDataMap); +- } +- +- public EndpointDetails() { +- } +- +- public EndpointDetails( +- String host, +- String datacenter) +- { +- this(); +- this.host = host; +- this.datacenter = datacenter; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public EndpointDetails(EndpointDetails other) { +- if (other.isSetHost()) { +- this.host = other.host; +- } +- if (other.isSetDatacenter()) { +- this.datacenter = other.datacenter; +- } +- if (other.isSetRack()) { +- this.rack = other.rack; +- } +- } +- +- public EndpointDetails deepCopy() { +- return new EndpointDetails(this); +- } +- +- @Override +- public void clear() { +- this.host = null; +- this.datacenter = null; +- this.rack = null; +- } +- +- public String getHost() { +- return this.host; +- } +- +- public EndpointDetails setHost(String host) { +- this.host = host; +- return this; +- } +- +- public void unsetHost() { +- this.host = null; +- } +- +- /** Returns true if field host is set (has been assigned a value) and false otherwise */ +- public boolean isSetHost() { +- return this.host != null; +- } +- +- public void setHostIsSet(boolean value) { +- if (!value) { +- this.host = null; +- } +- } +- +- public String getDatacenter() { +- return this.datacenter; +- } +- +- public EndpointDetails setDatacenter(String datacenter) { +- this.datacenter = datacenter; +- return this; +- } +- +- public void unsetDatacenter() { +- this.datacenter = null; +- } +- +- /** Returns true if field datacenter is set (has been assigned a value) and false otherwise */ +- public boolean isSetDatacenter() { +- return this.datacenter != null; +- } +- +- public void setDatacenterIsSet(boolean value) { +- if (!value) { +- this.datacenter = null; +- } +- } +- +- public String getRack() { +- return this.rack; +- } +- +- public EndpointDetails setRack(String rack) { +- this.rack = rack; +- return this; +- } +- +- public void unsetRack() { +- this.rack = null; +- } +- +- /** Returns true if field rack is set (has been assigned a value) and false otherwise */ +- public boolean isSetRack() { +- return this.rack != null; +- } +- +- public void setRackIsSet(boolean value) { +- if (!value) { +- this.rack = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case HOST: +- if (value == null) { +- unsetHost(); +- } else { +- setHost((String)value); +- } +- break; +- +- case DATACENTER: +- if (value == null) { +- unsetDatacenter(); +- } else { +- setDatacenter((String)value); +- } +- break; +- +- case RACK: +- if (value == null) { +- unsetRack(); +- } else { +- setRack((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case HOST: +- return getHost(); +- +- case DATACENTER: +- return getDatacenter(); +- +- case RACK: +- return getRack(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case HOST: +- return isSetHost(); +- case DATACENTER: +- return isSetDatacenter(); +- case RACK: +- return isSetRack(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof EndpointDetails) +- return this.equals((EndpointDetails)that); +- return false; +- } +- +- public boolean equals(EndpointDetails that) { +- if (that == null) +- return false; +- +- boolean this_present_host = true && this.isSetHost(); +- boolean that_present_host = true && that.isSetHost(); +- if (this_present_host || that_present_host) { +- if (!(this_present_host && that_present_host)) +- return false; +- if (!this.host.equals(that.host)) +- return false; +- } +- +- boolean this_present_datacenter = true && this.isSetDatacenter(); +- boolean that_present_datacenter = true && that.isSetDatacenter(); +- if (this_present_datacenter || that_present_datacenter) { +- if (!(this_present_datacenter && that_present_datacenter)) +- return false; +- if (!this.datacenter.equals(that.datacenter)) +- return false; +- } +- +- boolean this_present_rack = true && this.isSetRack(); +- boolean that_present_rack = true && that.isSetRack(); +- if (this_present_rack || that_present_rack) { +- if (!(this_present_rack && that_present_rack)) +- return false; +- if (!this.rack.equals(that.rack)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_host = true && (isSetHost()); +- builder.append(present_host); +- if (present_host) +- builder.append(host); +- +- boolean present_datacenter = true && (isSetDatacenter()); +- builder.append(present_datacenter); +- if (present_datacenter) +- builder.append(datacenter); +- +- boolean present_rack = true && (isSetRack()); +- builder.append(present_rack); +- if (present_rack) +- builder.append(rack); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(EndpointDetails other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetHost()).compareTo(other.isSetHost()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetHost()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.host, other.host); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetDatacenter()).compareTo(other.isSetDatacenter()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetDatacenter()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.datacenter, other.datacenter); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRack()).compareTo(other.isSetRack()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRack()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rack, other.rack); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("EndpointDetails("); +- boolean first = true; +- +- sb.append("host:"); +- if (this.host == null) { +- sb.append("null"); +- } else { +- sb.append(this.host); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("datacenter:"); +- if (this.datacenter == null) { +- sb.append("null"); +- } else { +- sb.append(this.datacenter); +- } +- first = false; +- if (isSetRack()) { +- if (!first) sb.append(", "); +- sb.append("rack:"); +- if (this.rack == null) { +- sb.append("null"); +- } else { +- sb.append(this.rack); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class EndpointDetailsStandardSchemeFactory implements SchemeFactory { +- public EndpointDetailsStandardScheme getScheme() { +- return new EndpointDetailsStandardScheme(); +- } +- } +- +- private static class EndpointDetailsStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, EndpointDetails struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // HOST +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.host = iprot.readString(); +- struct.setHostIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // DATACENTER +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.datacenter = iprot.readString(); +- struct.setDatacenterIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // RACK +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.rack = iprot.readString(); +- struct.setRackIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, EndpointDetails struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.host != null) { +- oprot.writeFieldBegin(HOST_FIELD_DESC); +- oprot.writeString(struct.host); +- oprot.writeFieldEnd(); +- } +- if (struct.datacenter != null) { +- oprot.writeFieldBegin(DATACENTER_FIELD_DESC); +- oprot.writeString(struct.datacenter); +- oprot.writeFieldEnd(); +- } +- if (struct.rack != null) { +- if (struct.isSetRack()) { +- oprot.writeFieldBegin(RACK_FIELD_DESC); +- oprot.writeString(struct.rack); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class EndpointDetailsTupleSchemeFactory implements SchemeFactory { +- public EndpointDetailsTupleScheme getScheme() { +- return new EndpointDetailsTupleScheme(); +- } +- } +- +- private static class EndpointDetailsTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, EndpointDetails struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetHost()) { +- optionals.set(0); +- } +- if (struct.isSetDatacenter()) { +- optionals.set(1); +- } +- if (struct.isSetRack()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetHost()) { +- oprot.writeString(struct.host); +- } +- if (struct.isSetDatacenter()) { +- oprot.writeString(struct.datacenter); +- } +- if (struct.isSetRack()) { +- oprot.writeString(struct.rack); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, EndpointDetails struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.host = iprot.readString(); +- struct.setHostIsSet(true); +- } +- if (incoming.get(1)) { +- struct.datacenter = iprot.readString(); +- struct.setDatacenterIsSet(true); +- } +- if (incoming.get(2)) { +- struct.rack = iprot.readString(); +- struct.setRackIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexClause.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexClause.java +deleted file mode 100644 +index f3524b5..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexClause.java ++++ /dev/null +@@ -1,681 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * @deprecated use a KeyRange with row_filter in get_range_slices instead +- */ +-public class IndexClause implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IndexClause"); +- +- private static final org.apache.thrift.protocol.TField EXPRESSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("expressions", org.apache.thrift.protocol.TType.LIST, (short)1); +- private static final org.apache.thrift.protocol.TField START_KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("start_key", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("count", org.apache.thrift.protocol.TType.I32, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new IndexClauseStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new IndexClauseTupleSchemeFactory()); +- } +- +- public List expressions; // required +- public ByteBuffer start_key; // required +- public int count; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- EXPRESSIONS((short)1, "expressions"), +- START_KEY((short)2, "start_key"), +- COUNT((short)3, "count"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // EXPRESSIONS +- return EXPRESSIONS; +- case 2: // START_KEY +- return START_KEY; +- case 3: // COUNT +- return COUNT; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __COUNT_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.EXPRESSIONS, new org.apache.thrift.meta_data.FieldMetaData("expressions", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IndexExpression.class)))); +- tmpMap.put(_Fields.START_KEY, new org.apache.thrift.meta_data.FieldMetaData("start_key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COUNT, new org.apache.thrift.meta_data.FieldMetaData("count", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(IndexClause.class, metaDataMap); +- } +- +- public IndexClause() { +- this.count = 100; +- +- } +- +- public IndexClause( +- List expressions, +- ByteBuffer start_key, +- int count) +- { +- this(); +- this.expressions = expressions; +- this.start_key = start_key; +- this.count = count; +- setCountIsSet(true); +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public IndexClause(IndexClause other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetExpressions()) { +- List __this__expressions = new ArrayList(other.expressions.size()); +- for (IndexExpression other_element : other.expressions) { +- __this__expressions.add(new IndexExpression(other_element)); +- } +- this.expressions = __this__expressions; +- } +- if (other.isSetStart_key()) { +- this.start_key = org.apache.thrift.TBaseHelper.copyBinary(other.start_key); +-; +- } +- this.count = other.count; +- } +- +- public IndexClause deepCopy() { +- return new IndexClause(this); +- } +- +- @Override +- public void clear() { +- this.expressions = null; +- this.start_key = null; +- this.count = 100; +- +- } +- +- public int getExpressionsSize() { +- return (this.expressions == null) ? 0 : this.expressions.size(); +- } +- +- public java.util.Iterator getExpressionsIterator() { +- return (this.expressions == null) ? null : this.expressions.iterator(); +- } +- +- public void addToExpressions(IndexExpression elem) { +- if (this.expressions == null) { +- this.expressions = new ArrayList(); +- } +- this.expressions.add(elem); +- } +- +- public List getExpressions() { +- return this.expressions; +- } +- +- public IndexClause setExpressions(List expressions) { +- this.expressions = expressions; +- return this; +- } +- +- public void unsetExpressions() { +- this.expressions = null; +- } +- +- /** Returns true if field expressions is set (has been assigned a value) and false otherwise */ +- public boolean isSetExpressions() { +- return this.expressions != null; +- } +- +- public void setExpressionsIsSet(boolean value) { +- if (!value) { +- this.expressions = null; +- } +- } +- +- public byte[] getStart_key() { +- setStart_key(org.apache.thrift.TBaseHelper.rightSize(start_key)); +- return start_key == null ? null : start_key.array(); +- } +- +- public ByteBuffer bufferForStart_key() { +- return start_key; +- } +- +- public IndexClause setStart_key(byte[] start_key) { +- setStart_key(start_key == null ? (ByteBuffer)null : ByteBuffer.wrap(start_key)); +- return this; +- } +- +- public IndexClause setStart_key(ByteBuffer start_key) { +- this.start_key = start_key; +- return this; +- } +- +- public void unsetStart_key() { +- this.start_key = null; +- } +- +- /** Returns true if field start_key is set (has been assigned a value) and false otherwise */ +- public boolean isSetStart_key() { +- return this.start_key != null; +- } +- +- public void setStart_keyIsSet(boolean value) { +- if (!value) { +- this.start_key = null; +- } +- } +- +- public int getCount() { +- return this.count; +- } +- +- public IndexClause setCount(int count) { +- this.count = count; +- setCountIsSet(true); +- return this; +- } +- +- public void unsetCount() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- /** Returns true if field count is set (has been assigned a value) and false otherwise */ +- public boolean isSetCount() { +- return EncodingUtils.testBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- public void setCountIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COUNT_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case EXPRESSIONS: +- if (value == null) { +- unsetExpressions(); +- } else { +- setExpressions((List)value); +- } +- break; +- +- case START_KEY: +- if (value == null) { +- unsetStart_key(); +- } else { +- setStart_key((ByteBuffer)value); +- } +- break; +- +- case COUNT: +- if (value == null) { +- unsetCount(); +- } else { +- setCount((Integer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case EXPRESSIONS: +- return getExpressions(); +- +- case START_KEY: +- return getStart_key(); +- +- case COUNT: +- return Integer.valueOf(getCount()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case EXPRESSIONS: +- return isSetExpressions(); +- case START_KEY: +- return isSetStart_key(); +- case COUNT: +- return isSetCount(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof IndexClause) +- return this.equals((IndexClause)that); +- return false; +- } +- +- public boolean equals(IndexClause that) { +- if (that == null) +- return false; +- +- boolean this_present_expressions = true && this.isSetExpressions(); +- boolean that_present_expressions = true && that.isSetExpressions(); +- if (this_present_expressions || that_present_expressions) { +- if (!(this_present_expressions && that_present_expressions)) +- return false; +- if (!this.expressions.equals(that.expressions)) +- return false; +- } +- +- boolean this_present_start_key = true && this.isSetStart_key(); +- boolean that_present_start_key = true && that.isSetStart_key(); +- if (this_present_start_key || that_present_start_key) { +- if (!(this_present_start_key && that_present_start_key)) +- return false; +- if (!this.start_key.equals(that.start_key)) +- return false; +- } +- +- boolean this_present_count = true; +- boolean that_present_count = true; +- if (this_present_count || that_present_count) { +- if (!(this_present_count && that_present_count)) +- return false; +- if (this.count != that.count) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_expressions = true && (isSetExpressions()); +- builder.append(present_expressions); +- if (present_expressions) +- builder.append(expressions); +- +- boolean present_start_key = true && (isSetStart_key()); +- builder.append(present_start_key); +- if (present_start_key) +- builder.append(start_key); +- +- boolean present_count = true; +- builder.append(present_count); +- if (present_count) +- builder.append(count); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(IndexClause other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetExpressions()).compareTo(other.isSetExpressions()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetExpressions()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.expressions, other.expressions); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetStart_key()).compareTo(other.isSetStart_key()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStart_key()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start_key, other.start_key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCount()).compareTo(other.isSetCount()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCount()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.count, other.count); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("IndexClause("); +- boolean first = true; +- +- sb.append("expressions:"); +- if (this.expressions == null) { +- sb.append("null"); +- } else { +- sb.append(this.expressions); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("start_key:"); +- if (this.start_key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.start_key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("count:"); +- sb.append(this.count); +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (expressions == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'expressions' was not present! Struct: " + toString()); +- } +- if (start_key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'start_key' was not present! Struct: " + toString()); +- } +- // alas, we cannot check 'count' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class IndexClauseStandardSchemeFactory implements SchemeFactory { +- public IndexClauseStandardScheme getScheme() { +- return new IndexClauseStandardScheme(); +- } +- } +- +- private static class IndexClauseStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, IndexClause struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // EXPRESSIONS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list24 = iprot.readListBegin(); +- struct.expressions = new ArrayList(_list24.size); +- for (int _i25 = 0; _i25 < _list24.size; ++_i25) +- { +- IndexExpression _elem26; +- _elem26 = new IndexExpression(); +- _elem26.read(iprot); +- struct.expressions.add(_elem26); +- } +- iprot.readListEnd(); +- } +- struct.setExpressionsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // START_KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.start_key = iprot.readBinary(); +- struct.setStart_keyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // COUNT +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetCount()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'count' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, IndexClause struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.expressions != null) { +- oprot.writeFieldBegin(EXPRESSIONS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.expressions.size())); +- for (IndexExpression _iter27 : struct.expressions) +- { +- _iter27.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.start_key != null) { +- oprot.writeFieldBegin(START_KEY_FIELD_DESC); +- oprot.writeBinary(struct.start_key); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldBegin(COUNT_FIELD_DESC); +- oprot.writeI32(struct.count); +- oprot.writeFieldEnd(); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class IndexClauseTupleSchemeFactory implements SchemeFactory { +- public IndexClauseTupleScheme getScheme() { +- return new IndexClauseTupleScheme(); +- } +- } +- +- private static class IndexClauseTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, IndexClause struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- { +- oprot.writeI32(struct.expressions.size()); +- for (IndexExpression _iter28 : struct.expressions) +- { +- _iter28.write(oprot); +- } +- } +- oprot.writeBinary(struct.start_key); +- oprot.writeI32(struct.count); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, IndexClause struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- { +- org.apache.thrift.protocol.TList _list29 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.expressions = new ArrayList(_list29.size); +- for (int _i30 = 0; _i30 < _list29.size; ++_i30) +- { +- IndexExpression _elem31; +- _elem31 = new IndexExpression(); +- _elem31.read(iprot); +- struct.expressions.add(_elem31); +- } +- } +- struct.setExpressionsIsSet(true); +- struct.start_key = iprot.readBinary(); +- struct.setStart_keyIsSet(true); +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexExpression.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexExpression.java +deleted file mode 100644 +index 5062f2f..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexExpression.java ++++ /dev/null +@@ -1,650 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class IndexExpression implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IndexExpression"); +- +- private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField OP_FIELD_DESC = new org.apache.thrift.protocol.TField("op", org.apache.thrift.protocol.TType.I32, (short)2); +- private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new IndexExpressionStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new IndexExpressionTupleSchemeFactory()); +- } +- +- public ByteBuffer column_name; // required +- /** +- * +- * @see IndexOperator +- */ +- public IndexOperator op; // required +- public ByteBuffer value; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- COLUMN_NAME((short)1, "column_name"), +- /** +- * +- * @see IndexOperator +- */ +- OP((short)2, "op"), +- VALUE((short)3, "value"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // COLUMN_NAME +- return COLUMN_NAME; +- case 2: // OP +- return OP; +- case 3: // VALUE +- return VALUE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("column_name", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.OP, new org.apache.thrift.meta_data.FieldMetaData("op", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IndexOperator.class))); +- tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(IndexExpression.class, metaDataMap); +- } +- +- public IndexExpression() { +- } +- +- public IndexExpression( +- ByteBuffer column_name, +- IndexOperator op, +- ByteBuffer value) +- { +- this(); +- this.column_name = column_name; +- this.op = op; +- this.value = value; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public IndexExpression(IndexExpression other) { +- if (other.isSetColumn_name()) { +- this.column_name = org.apache.thrift.TBaseHelper.copyBinary(other.column_name); +-; +- } +- if (other.isSetOp()) { +- this.op = other.op; +- } +- if (other.isSetValue()) { +- this.value = org.apache.thrift.TBaseHelper.copyBinary(other.value); +-; +- } +- } +- +- public IndexExpression deepCopy() { +- return new IndexExpression(this); +- } +- +- @Override +- public void clear() { +- this.column_name = null; +- this.op = null; +- this.value = null; +- } +- +- public byte[] getColumn_name() { +- setColumn_name(org.apache.thrift.TBaseHelper.rightSize(column_name)); +- return column_name == null ? null : column_name.array(); +- } +- +- public ByteBuffer bufferForColumn_name() { +- return column_name; +- } +- +- public IndexExpression setColumn_name(byte[] column_name) { +- setColumn_name(column_name == null ? (ByteBuffer)null : ByteBuffer.wrap(column_name)); +- return this; +- } +- +- public IndexExpression setColumn_name(ByteBuffer column_name) { +- this.column_name = column_name; +- return this; +- } +- +- public void unsetColumn_name() { +- this.column_name = null; +- } +- +- /** Returns true if field column_name is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_name() { +- return this.column_name != null; +- } +- +- public void setColumn_nameIsSet(boolean value) { +- if (!value) { +- this.column_name = null; +- } +- } +- +- /** +- * +- * @see IndexOperator +- */ +- public IndexOperator getOp() { +- return this.op; +- } +- +- /** +- * +- * @see IndexOperator +- */ +- public IndexExpression setOp(IndexOperator op) { +- this.op = op; +- return this; +- } +- +- public void unsetOp() { +- this.op = null; +- } +- +- /** Returns true if field op is set (has been assigned a value) and false otherwise */ +- public boolean isSetOp() { +- return this.op != null; +- } +- +- public void setOpIsSet(boolean value) { +- if (!value) { +- this.op = null; +- } +- } +- +- public byte[] getValue() { +- setValue(org.apache.thrift.TBaseHelper.rightSize(value)); +- return value == null ? null : value.array(); +- } +- +- public ByteBuffer bufferForValue() { +- return value; +- } +- +- public IndexExpression setValue(byte[] value) { +- setValue(value == null ? (ByteBuffer)null : ByteBuffer.wrap(value)); +- return this; +- } +- +- public IndexExpression setValue(ByteBuffer value) { +- this.value = value; +- return this; +- } +- +- public void unsetValue() { +- this.value = null; +- } +- +- /** Returns true if field value is set (has been assigned a value) and false otherwise */ +- public boolean isSetValue() { +- return this.value != null; +- } +- +- public void setValueIsSet(boolean value) { +- if (!value) { +- this.value = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case COLUMN_NAME: +- if (value == null) { +- unsetColumn_name(); +- } else { +- setColumn_name((ByteBuffer)value); +- } +- break; +- +- case OP: +- if (value == null) { +- unsetOp(); +- } else { +- setOp((IndexOperator)value); +- } +- break; +- +- case VALUE: +- if (value == null) { +- unsetValue(); +- } else { +- setValue((ByteBuffer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case COLUMN_NAME: +- return getColumn_name(); +- +- case OP: +- return getOp(); +- +- case VALUE: +- return getValue(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case COLUMN_NAME: +- return isSetColumn_name(); +- case OP: +- return isSetOp(); +- case VALUE: +- return isSetValue(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof IndexExpression) +- return this.equals((IndexExpression)that); +- return false; +- } +- +- public boolean equals(IndexExpression that) { +- if (that == null) +- return false; +- +- boolean this_present_column_name = true && this.isSetColumn_name(); +- boolean that_present_column_name = true && that.isSetColumn_name(); +- if (this_present_column_name || that_present_column_name) { +- if (!(this_present_column_name && that_present_column_name)) +- return false; +- if (!this.column_name.equals(that.column_name)) +- return false; +- } +- +- boolean this_present_op = true && this.isSetOp(); +- boolean that_present_op = true && that.isSetOp(); +- if (this_present_op || that_present_op) { +- if (!(this_present_op && that_present_op)) +- return false; +- if (!this.op.equals(that.op)) +- return false; +- } +- +- boolean this_present_value = true && this.isSetValue(); +- boolean that_present_value = true && that.isSetValue(); +- if (this_present_value || that_present_value) { +- if (!(this_present_value && that_present_value)) +- return false; +- if (!this.value.equals(that.value)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_column_name = true && (isSetColumn_name()); +- builder.append(present_column_name); +- if (present_column_name) +- builder.append(column_name); +- +- boolean present_op = true && (isSetOp()); +- builder.append(present_op); +- if (present_op) +- builder.append(op.getValue()); +- +- boolean present_value = true && (isSetValue()); +- builder.append(present_value); +- if (present_value) +- builder.append(value); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(IndexExpression other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetColumn_name()).compareTo(other.isSetColumn_name()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_name()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_name, other.column_name); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetOp()).compareTo(other.isSetOp()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetOp()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.op, other.op); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetValue()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("IndexExpression("); +- boolean first = true; +- +- sb.append("column_name:"); +- if (this.column_name == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.column_name, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("op:"); +- if (this.op == null) { +- sb.append("null"); +- } else { +- sb.append(this.op); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("value:"); +- if (this.value == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.value, sb); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (column_name == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'column_name' was not present! Struct: " + toString()); +- } +- if (op == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'op' was not present! Struct: " + toString()); +- } +- if (value == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'value' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class IndexExpressionStandardSchemeFactory implements SchemeFactory { +- public IndexExpressionStandardScheme getScheme() { +- return new IndexExpressionStandardScheme(); +- } +- } +- +- private static class IndexExpressionStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, IndexExpression struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // COLUMN_NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.column_name = iprot.readBinary(); +- struct.setColumn_nameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // OP +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.op = IndexOperator.findByValue(iprot.readI32()); +- struct.setOpIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // VALUE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.value = iprot.readBinary(); +- struct.setValueIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, IndexExpression struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.column_name != null) { +- oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC); +- oprot.writeBinary(struct.column_name); +- oprot.writeFieldEnd(); +- } +- if (struct.op != null) { +- oprot.writeFieldBegin(OP_FIELD_DESC); +- oprot.writeI32(struct.op.getValue()); +- oprot.writeFieldEnd(); +- } +- if (struct.value != null) { +- oprot.writeFieldBegin(VALUE_FIELD_DESC); +- oprot.writeBinary(struct.value); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class IndexExpressionTupleSchemeFactory implements SchemeFactory { +- public IndexExpressionTupleScheme getScheme() { +- return new IndexExpressionTupleScheme(); +- } +- } +- +- private static class IndexExpressionTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, IndexExpression struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.column_name); +- oprot.writeI32(struct.op.getValue()); +- oprot.writeBinary(struct.value); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, IndexExpression struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.column_name = iprot.readBinary(); +- struct.setColumn_nameIsSet(true); +- struct.op = IndexOperator.findByValue(iprot.readI32()); +- struct.setOpIsSet(true); +- struct.value = iprot.readBinary(); +- struct.setValueIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexOperator.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexOperator.java +deleted file mode 100644 +index 767d773..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexOperator.java ++++ /dev/null +@@ -1,75 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +- +-import java.util.Map; +-import java.util.HashMap; +-import org.apache.thrift.TEnum; +- +-public enum IndexOperator implements org.apache.thrift.TEnum { +- EQ(0), +- GTE(1), +- GT(2), +- LTE(3), +- LT(4); +- +- private final int value; +- +- private IndexOperator(int value) { +- this.value = value; +- } +- +- /** +- * Get the integer value of this enum value, as defined in the Thrift IDL. +- */ +- public int getValue() { +- return value; +- } +- +- /** +- * Find a the enum type by its integer value, as defined in the Thrift IDL. +- * @return null if the value is not found. +- */ +- public static IndexOperator findByValue(int value) { +- switch (value) { +- case 0: +- return EQ; +- case 1: +- return GTE; +- case 2: +- return GT; +- case 3: +- return LTE; +- case 4: +- return LT; +- default: +- return null; +- } +- } +-} +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexType.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexType.java +deleted file mode 100644 +index e6a5e9b..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/IndexType.java ++++ /dev/null +@@ -1,69 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +- +-import java.util.Map; +-import java.util.HashMap; +-import org.apache.thrift.TEnum; +- +-public enum IndexType implements org.apache.thrift.TEnum { +- KEYS(0), +- CUSTOM(1), +- COMPOSITES(2); +- +- private final int value; +- +- private IndexType(int value) { +- this.value = value; +- } +- +- /** +- * Get the integer value of this enum value, as defined in the Thrift IDL. +- */ +- public int getValue() { +- return value; +- } +- +- /** +- * Find a the enum type by its integer value, as defined in the Thrift IDL. +- * @return null if the value is not found. +- */ +- public static IndexType findByValue(int value) { +- switch (value) { +- case 0: +- return KEYS; +- case 1: +- return CUSTOM; +- case 2: +- return COMPOSITES; +- default: +- return null; +- } +- } +-} +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/InvalidRequestException.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/InvalidRequestException.java +deleted file mode 100644 +index 6038a23..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/InvalidRequestException.java ++++ /dev/null +@@ -1,414 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Invalid request could mean keyspace or column family does not exist, required parameters are missing, or a parameter is malformed. +- * why contains an associated error message. +- */ +-public class InvalidRequestException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidRequestException"); +- +- private static final org.apache.thrift.protocol.TField WHY_FIELD_DESC = new org.apache.thrift.protocol.TField("why", org.apache.thrift.protocol.TType.STRING, (short)1); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new InvalidRequestExceptionStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new InvalidRequestExceptionTupleSchemeFactory()); +- } +- +- public String why; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- WHY((short)1, "why"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // WHY +- return WHY; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.WHY, new org.apache.thrift.meta_data.FieldMetaData("why", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(InvalidRequestException.class, metaDataMap); +- } +- +- public InvalidRequestException() { +- } +- +- public InvalidRequestException( +- String why) +- { +- this(); +- this.why = why; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public InvalidRequestException(InvalidRequestException other) { +- if (other.isSetWhy()) { +- this.why = other.why; +- } +- } +- +- public InvalidRequestException deepCopy() { +- return new InvalidRequestException(this); +- } +- +- @Override +- public void clear() { +- this.why = null; +- } +- +- public String getWhy() { +- return this.why; +- } +- +- public InvalidRequestException setWhy(String why) { +- this.why = why; +- return this; +- } +- +- public void unsetWhy() { +- this.why = null; +- } +- +- /** Returns true if field why is set (has been assigned a value) and false otherwise */ +- public boolean isSetWhy() { +- return this.why != null; +- } +- +- public void setWhyIsSet(boolean value) { +- if (!value) { +- this.why = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case WHY: +- if (value == null) { +- unsetWhy(); +- } else { +- setWhy((String)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case WHY: +- return getWhy(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case WHY: +- return isSetWhy(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof InvalidRequestException) +- return this.equals((InvalidRequestException)that); +- return false; +- } +- +- public boolean equals(InvalidRequestException that) { +- if (that == null) +- return false; +- +- boolean this_present_why = true && this.isSetWhy(); +- boolean that_present_why = true && that.isSetWhy(); +- if (this_present_why || that_present_why) { +- if (!(this_present_why && that_present_why)) +- return false; +- if (!this.why.equals(that.why)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_why = true && (isSetWhy()); +- builder.append(present_why); +- if (present_why) +- builder.append(why); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(InvalidRequestException other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetWhy()).compareTo(other.isSetWhy()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetWhy()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.why, other.why); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("InvalidRequestException("); +- boolean first = true; +- +- sb.append("why:"); +- if (this.why == null) { +- sb.append("null"); +- } else { +- sb.append(this.why); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (why == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'why' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class InvalidRequestExceptionStandardSchemeFactory implements SchemeFactory { +- public InvalidRequestExceptionStandardScheme getScheme() { +- return new InvalidRequestExceptionStandardScheme(); +- } +- } +- +- private static class InvalidRequestExceptionStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, InvalidRequestException struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // WHY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.why = iprot.readString(); +- struct.setWhyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, InvalidRequestException struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.why != null) { +- oprot.writeFieldBegin(WHY_FIELD_DESC); +- oprot.writeString(struct.why); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class InvalidRequestExceptionTupleSchemeFactory implements SchemeFactory { +- public InvalidRequestExceptionTupleScheme getScheme() { +- return new InvalidRequestExceptionTupleScheme(); +- } +- } +- +- private static class InvalidRequestExceptionTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, InvalidRequestException struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.why); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, InvalidRequestException struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.why = iprot.readString(); +- struct.setWhyIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/KeyCount.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/KeyCount.java +deleted file mode 100644 +index cbb5e51..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/KeyCount.java ++++ /dev/null +@@ -1,521 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class KeyCount implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("KeyCount"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("count", org.apache.thrift.protocol.TType.I32, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new KeyCountStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new KeyCountTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public int count; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COUNT((short)2, "count"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COUNT +- return COUNT; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __COUNT_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COUNT, new org.apache.thrift.meta_data.FieldMetaData("count", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(KeyCount.class, metaDataMap); +- } +- +- public KeyCount() { +- } +- +- public KeyCount( +- ByteBuffer key, +- int count) +- { +- this(); +- this.key = key; +- this.count = count; +- setCountIsSet(true); +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public KeyCount(KeyCount other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- this.count = other.count; +- } +- +- public KeyCount deepCopy() { +- return new KeyCount(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- setCountIsSet(false); +- this.count = 0; +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public KeyCount setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public KeyCount setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public int getCount() { +- return this.count; +- } +- +- public KeyCount setCount(int count) { +- this.count = count; +- setCountIsSet(true); +- return this; +- } +- +- public void unsetCount() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- /** Returns true if field count is set (has been assigned a value) and false otherwise */ +- public boolean isSetCount() { +- return EncodingUtils.testBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- public void setCountIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COUNT_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COUNT: +- if (value == null) { +- unsetCount(); +- } else { +- setCount((Integer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COUNT: +- return Integer.valueOf(getCount()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COUNT: +- return isSetCount(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof KeyCount) +- return this.equals((KeyCount)that); +- return false; +- } +- +- public boolean equals(KeyCount that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_count = true; +- boolean that_present_count = true; +- if (this_present_count || that_present_count) { +- if (!(this_present_count && that_present_count)) +- return false; +- if (this.count != that.count) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_count = true; +- builder.append(present_count); +- if (present_count) +- builder.append(count); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(KeyCount other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCount()).compareTo(other.isSetCount()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCount()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.count, other.count); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("KeyCount("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("count:"); +- sb.append(this.count); +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- // alas, we cannot check 'count' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class KeyCountStandardSchemeFactory implements SchemeFactory { +- public KeyCountStandardScheme getScheme() { +- return new KeyCountStandardScheme(); +- } +- } +- +- private static class KeyCountStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, KeyCount struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COUNT +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetCount()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'count' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, KeyCount struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldBegin(COUNT_FIELD_DESC); +- oprot.writeI32(struct.count); +- oprot.writeFieldEnd(); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class KeyCountTupleSchemeFactory implements SchemeFactory { +- public KeyCountTupleScheme getScheme() { +- return new KeyCountTupleScheme(); +- } +- } +- +- private static class KeyCountTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, KeyCount struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- oprot.writeI32(struct.count); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, KeyCount struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/KeyRange.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/KeyRange.java +deleted file mode 100644 +index 0168410..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/KeyRange.java ++++ /dev/null +@@ -1,1034 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * The semantics of start keys and tokens are slightly different. +- * Keys are start-inclusive; tokens are start-exclusive. Token +- * ranges may also wrap -- that is, the end token may be less +- * than the start one. Thus, a range from keyX to keyX is a +- * one-element range, but a range from tokenY to tokenY is the +- * full ring. +- */ +-public class KeyRange implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("KeyRange"); +- +- private static final org.apache.thrift.protocol.TField START_KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("start_key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField END_KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("end_key", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField START_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("start_token", org.apache.thrift.protocol.TType.STRING, (short)3); +- private static final org.apache.thrift.protocol.TField END_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("end_token", org.apache.thrift.protocol.TType.STRING, (short)4); +- private static final org.apache.thrift.protocol.TField ROW_FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("row_filter", org.apache.thrift.protocol.TType.LIST, (short)6); +- private static final org.apache.thrift.protocol.TField COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("count", org.apache.thrift.protocol.TType.I32, (short)5); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new KeyRangeStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new KeyRangeTupleSchemeFactory()); +- } +- +- public ByteBuffer start_key; // optional +- public ByteBuffer end_key; // optional +- public String start_token; // optional +- public String end_token; // optional +- public List row_filter; // optional +- public int count; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- START_KEY((short)1, "start_key"), +- END_KEY((short)2, "end_key"), +- START_TOKEN((short)3, "start_token"), +- END_TOKEN((short)4, "end_token"), +- ROW_FILTER((short)6, "row_filter"), +- COUNT((short)5, "count"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // START_KEY +- return START_KEY; +- case 2: // END_KEY +- return END_KEY; +- case 3: // START_TOKEN +- return START_TOKEN; +- case 4: // END_TOKEN +- return END_TOKEN; +- case 6: // ROW_FILTER +- return ROW_FILTER; +- case 5: // COUNT +- return COUNT; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __COUNT_ISSET_ID = 0; +- private byte __isset_bitfield = 0; +- private _Fields optionals[] = {_Fields.START_KEY,_Fields.END_KEY,_Fields.START_TOKEN,_Fields.END_TOKEN,_Fields.ROW_FILTER}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.START_KEY, new org.apache.thrift.meta_data.FieldMetaData("start_key", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.END_KEY, new org.apache.thrift.meta_data.FieldMetaData("end_key", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.START_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("start_token", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.END_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("end_token", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.ROW_FILTER, new org.apache.thrift.meta_data.FieldMetaData("row_filter", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IndexExpression.class)))); +- tmpMap.put(_Fields.COUNT, new org.apache.thrift.meta_data.FieldMetaData("count", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(KeyRange.class, metaDataMap); +- } +- +- public KeyRange() { +- this.count = 100; +- +- } +- +- public KeyRange( +- int count) +- { +- this(); +- this.count = count; +- setCountIsSet(true); +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public KeyRange(KeyRange other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetStart_key()) { +- this.start_key = org.apache.thrift.TBaseHelper.copyBinary(other.start_key); +-; +- } +- if (other.isSetEnd_key()) { +- this.end_key = org.apache.thrift.TBaseHelper.copyBinary(other.end_key); +-; +- } +- if (other.isSetStart_token()) { +- this.start_token = other.start_token; +- } +- if (other.isSetEnd_token()) { +- this.end_token = other.end_token; +- } +- if (other.isSetRow_filter()) { +- List __this__row_filter = new ArrayList(other.row_filter.size()); +- for (IndexExpression other_element : other.row_filter) { +- __this__row_filter.add(new IndexExpression(other_element)); +- } +- this.row_filter = __this__row_filter; +- } +- this.count = other.count; +- } +- +- public KeyRange deepCopy() { +- return new KeyRange(this); +- } +- +- @Override +- public void clear() { +- this.start_key = null; +- this.end_key = null; +- this.start_token = null; +- this.end_token = null; +- this.row_filter = null; +- this.count = 100; +- +- } +- +- public byte[] getStart_key() { +- setStart_key(org.apache.thrift.TBaseHelper.rightSize(start_key)); +- return start_key == null ? null : start_key.array(); +- } +- +- public ByteBuffer bufferForStart_key() { +- return start_key; +- } +- +- public KeyRange setStart_key(byte[] start_key) { +- setStart_key(start_key == null ? (ByteBuffer)null : ByteBuffer.wrap(start_key)); +- return this; +- } +- +- public KeyRange setStart_key(ByteBuffer start_key) { +- this.start_key = start_key; +- return this; +- } +- +- public void unsetStart_key() { +- this.start_key = null; +- } +- +- /** Returns true if field start_key is set (has been assigned a value) and false otherwise */ +- public boolean isSetStart_key() { +- return this.start_key != null; +- } +- +- public void setStart_keyIsSet(boolean value) { +- if (!value) { +- this.start_key = null; +- } +- } +- +- public byte[] getEnd_key() { +- setEnd_key(org.apache.thrift.TBaseHelper.rightSize(end_key)); +- return end_key == null ? null : end_key.array(); +- } +- +- public ByteBuffer bufferForEnd_key() { +- return end_key; +- } +- +- public KeyRange setEnd_key(byte[] end_key) { +- setEnd_key(end_key == null ? (ByteBuffer)null : ByteBuffer.wrap(end_key)); +- return this; +- } +- +- public KeyRange setEnd_key(ByteBuffer end_key) { +- this.end_key = end_key; +- return this; +- } +- +- public void unsetEnd_key() { +- this.end_key = null; +- } +- +- /** Returns true if field end_key is set (has been assigned a value) and false otherwise */ +- public boolean isSetEnd_key() { +- return this.end_key != null; +- } +- +- public void setEnd_keyIsSet(boolean value) { +- if (!value) { +- this.end_key = null; +- } +- } +- +- public String getStart_token() { +- return this.start_token; +- } +- +- public KeyRange setStart_token(String start_token) { +- this.start_token = start_token; +- return this; +- } +- +- public void unsetStart_token() { +- this.start_token = null; +- } +- +- /** Returns true if field start_token is set (has been assigned a value) and false otherwise */ +- public boolean isSetStart_token() { +- return this.start_token != null; +- } +- +- public void setStart_tokenIsSet(boolean value) { +- if (!value) { +- this.start_token = null; +- } +- } +- +- public String getEnd_token() { +- return this.end_token; +- } +- +- public KeyRange setEnd_token(String end_token) { +- this.end_token = end_token; +- return this; +- } +- +- public void unsetEnd_token() { +- this.end_token = null; +- } +- +- /** Returns true if field end_token is set (has been assigned a value) and false otherwise */ +- public boolean isSetEnd_token() { +- return this.end_token != null; +- } +- +- public void setEnd_tokenIsSet(boolean value) { +- if (!value) { +- this.end_token = null; +- } +- } +- +- public int getRow_filterSize() { +- return (this.row_filter == null) ? 0 : this.row_filter.size(); +- } +- +- public java.util.Iterator getRow_filterIterator() { +- return (this.row_filter == null) ? null : this.row_filter.iterator(); +- } +- +- public void addToRow_filter(IndexExpression elem) { +- if (this.row_filter == null) { +- this.row_filter = new ArrayList(); +- } +- this.row_filter.add(elem); +- } +- +- public List getRow_filter() { +- return this.row_filter; +- } +- +- public KeyRange setRow_filter(List row_filter) { +- this.row_filter = row_filter; +- return this; +- } +- +- public void unsetRow_filter() { +- this.row_filter = null; +- } +- +- /** Returns true if field row_filter is set (has been assigned a value) and false otherwise */ +- public boolean isSetRow_filter() { +- return this.row_filter != null; +- } +- +- public void setRow_filterIsSet(boolean value) { +- if (!value) { +- this.row_filter = null; +- } +- } +- +- public int getCount() { +- return this.count; +- } +- +- public KeyRange setCount(int count) { +- this.count = count; +- setCountIsSet(true); +- return this; +- } +- +- public void unsetCount() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- /** Returns true if field count is set (has been assigned a value) and false otherwise */ +- public boolean isSetCount() { +- return EncodingUtils.testBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- public void setCountIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COUNT_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case START_KEY: +- if (value == null) { +- unsetStart_key(); +- } else { +- setStart_key((ByteBuffer)value); +- } +- break; +- +- case END_KEY: +- if (value == null) { +- unsetEnd_key(); +- } else { +- setEnd_key((ByteBuffer)value); +- } +- break; +- +- case START_TOKEN: +- if (value == null) { +- unsetStart_token(); +- } else { +- setStart_token((String)value); +- } +- break; +- +- case END_TOKEN: +- if (value == null) { +- unsetEnd_token(); +- } else { +- setEnd_token((String)value); +- } +- break; +- +- case ROW_FILTER: +- if (value == null) { +- unsetRow_filter(); +- } else { +- setRow_filter((List)value); +- } +- break; +- +- case COUNT: +- if (value == null) { +- unsetCount(); +- } else { +- setCount((Integer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case START_KEY: +- return getStart_key(); +- +- case END_KEY: +- return getEnd_key(); +- +- case START_TOKEN: +- return getStart_token(); +- +- case END_TOKEN: +- return getEnd_token(); +- +- case ROW_FILTER: +- return getRow_filter(); +- +- case COUNT: +- return Integer.valueOf(getCount()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case START_KEY: +- return isSetStart_key(); +- case END_KEY: +- return isSetEnd_key(); +- case START_TOKEN: +- return isSetStart_token(); +- case END_TOKEN: +- return isSetEnd_token(); +- case ROW_FILTER: +- return isSetRow_filter(); +- case COUNT: +- return isSetCount(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof KeyRange) +- return this.equals((KeyRange)that); +- return false; +- } +- +- public boolean equals(KeyRange that) { +- if (that == null) +- return false; +- +- boolean this_present_start_key = true && this.isSetStart_key(); +- boolean that_present_start_key = true && that.isSetStart_key(); +- if (this_present_start_key || that_present_start_key) { +- if (!(this_present_start_key && that_present_start_key)) +- return false; +- if (!this.start_key.equals(that.start_key)) +- return false; +- } +- +- boolean this_present_end_key = true && this.isSetEnd_key(); +- boolean that_present_end_key = true && that.isSetEnd_key(); +- if (this_present_end_key || that_present_end_key) { +- if (!(this_present_end_key && that_present_end_key)) +- return false; +- if (!this.end_key.equals(that.end_key)) +- return false; +- } +- +- boolean this_present_start_token = true && this.isSetStart_token(); +- boolean that_present_start_token = true && that.isSetStart_token(); +- if (this_present_start_token || that_present_start_token) { +- if (!(this_present_start_token && that_present_start_token)) +- return false; +- if (!this.start_token.equals(that.start_token)) +- return false; +- } +- +- boolean this_present_end_token = true && this.isSetEnd_token(); +- boolean that_present_end_token = true && that.isSetEnd_token(); +- if (this_present_end_token || that_present_end_token) { +- if (!(this_present_end_token && that_present_end_token)) +- return false; +- if (!this.end_token.equals(that.end_token)) +- return false; +- } +- +- boolean this_present_row_filter = true && this.isSetRow_filter(); +- boolean that_present_row_filter = true && that.isSetRow_filter(); +- if (this_present_row_filter || that_present_row_filter) { +- if (!(this_present_row_filter && that_present_row_filter)) +- return false; +- if (!this.row_filter.equals(that.row_filter)) +- return false; +- } +- +- boolean this_present_count = true; +- boolean that_present_count = true; +- if (this_present_count || that_present_count) { +- if (!(this_present_count && that_present_count)) +- return false; +- if (this.count != that.count) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_start_key = true && (isSetStart_key()); +- builder.append(present_start_key); +- if (present_start_key) +- builder.append(start_key); +- +- boolean present_end_key = true && (isSetEnd_key()); +- builder.append(present_end_key); +- if (present_end_key) +- builder.append(end_key); +- +- boolean present_start_token = true && (isSetStart_token()); +- builder.append(present_start_token); +- if (present_start_token) +- builder.append(start_token); +- +- boolean present_end_token = true && (isSetEnd_token()); +- builder.append(present_end_token); +- if (present_end_token) +- builder.append(end_token); +- +- boolean present_row_filter = true && (isSetRow_filter()); +- builder.append(present_row_filter); +- if (present_row_filter) +- builder.append(row_filter); +- +- boolean present_count = true; +- builder.append(present_count); +- if (present_count) +- builder.append(count); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(KeyRange other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetStart_key()).compareTo(other.isSetStart_key()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStart_key()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start_key, other.start_key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetEnd_key()).compareTo(other.isSetEnd_key()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetEnd_key()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.end_key, other.end_key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetStart_token()).compareTo(other.isSetStart_token()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStart_token()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start_token, other.start_token); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetEnd_token()).compareTo(other.isSetEnd_token()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetEnd_token()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.end_token, other.end_token); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRow_filter()).compareTo(other.isSetRow_filter()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRow_filter()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row_filter, other.row_filter); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCount()).compareTo(other.isSetCount()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCount()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.count, other.count); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("KeyRange("); +- boolean first = true; +- +- if (isSetStart_key()) { +- sb.append("start_key:"); +- if (this.start_key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.start_key, sb); +- } +- first = false; +- } +- if (isSetEnd_key()) { +- if (!first) sb.append(", "); +- sb.append("end_key:"); +- if (this.end_key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.end_key, sb); +- } +- first = false; +- } +- if (isSetStart_token()) { +- if (!first) sb.append(", "); +- sb.append("start_token:"); +- if (this.start_token == null) { +- sb.append("null"); +- } else { +- sb.append(this.start_token); +- } +- first = false; +- } +- if (isSetEnd_token()) { +- if (!first) sb.append(", "); +- sb.append("end_token:"); +- if (this.end_token == null) { +- sb.append("null"); +- } else { +- sb.append(this.end_token); +- } +- first = false; +- } +- if (isSetRow_filter()) { +- if (!first) sb.append(", "); +- sb.append("row_filter:"); +- if (this.row_filter == null) { +- sb.append("null"); +- } else { +- sb.append(this.row_filter); +- } +- first = false; +- } +- if (!first) sb.append(", "); +- sb.append("count:"); +- sb.append(this.count); +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // alas, we cannot check 'count' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class KeyRangeStandardSchemeFactory implements SchemeFactory { +- public KeyRangeStandardScheme getScheme() { +- return new KeyRangeStandardScheme(); +- } +- } +- +- private static class KeyRangeStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, KeyRange struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // START_KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.start_key = iprot.readBinary(); +- struct.setStart_keyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // END_KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.end_key = iprot.readBinary(); +- struct.setEnd_keyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // START_TOKEN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.start_token = iprot.readString(); +- struct.setStart_tokenIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // END_TOKEN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.end_token = iprot.readString(); +- struct.setEnd_tokenIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 6: // ROW_FILTER +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list32 = iprot.readListBegin(); +- struct.row_filter = new ArrayList(_list32.size); +- for (int _i33 = 0; _i33 < _list32.size; ++_i33) +- { +- IndexExpression _elem34; +- _elem34 = new IndexExpression(); +- _elem34.read(iprot); +- struct.row_filter.add(_elem34); +- } +- iprot.readListEnd(); +- } +- struct.setRow_filterIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 5: // COUNT +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetCount()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'count' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, KeyRange struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.start_key != null) { +- if (struct.isSetStart_key()) { +- oprot.writeFieldBegin(START_KEY_FIELD_DESC); +- oprot.writeBinary(struct.start_key); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.end_key != null) { +- if (struct.isSetEnd_key()) { +- oprot.writeFieldBegin(END_KEY_FIELD_DESC); +- oprot.writeBinary(struct.end_key); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.start_token != null) { +- if (struct.isSetStart_token()) { +- oprot.writeFieldBegin(START_TOKEN_FIELD_DESC); +- oprot.writeString(struct.start_token); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.end_token != null) { +- if (struct.isSetEnd_token()) { +- oprot.writeFieldBegin(END_TOKEN_FIELD_DESC); +- oprot.writeString(struct.end_token); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldBegin(COUNT_FIELD_DESC); +- oprot.writeI32(struct.count); +- oprot.writeFieldEnd(); +- if (struct.row_filter != null) { +- if (struct.isSetRow_filter()) { +- oprot.writeFieldBegin(ROW_FILTER_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.row_filter.size())); +- for (IndexExpression _iter35 : struct.row_filter) +- { +- _iter35.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class KeyRangeTupleSchemeFactory implements SchemeFactory { +- public KeyRangeTupleScheme getScheme() { +- return new KeyRangeTupleScheme(); +- } +- } +- +- private static class KeyRangeTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, KeyRange struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeI32(struct.count); +- BitSet optionals = new BitSet(); +- if (struct.isSetStart_key()) { +- optionals.set(0); +- } +- if (struct.isSetEnd_key()) { +- optionals.set(1); +- } +- if (struct.isSetStart_token()) { +- optionals.set(2); +- } +- if (struct.isSetEnd_token()) { +- optionals.set(3); +- } +- if (struct.isSetRow_filter()) { +- optionals.set(4); +- } +- oprot.writeBitSet(optionals, 5); +- if (struct.isSetStart_key()) { +- oprot.writeBinary(struct.start_key); +- } +- if (struct.isSetEnd_key()) { +- oprot.writeBinary(struct.end_key); +- } +- if (struct.isSetStart_token()) { +- oprot.writeString(struct.start_token); +- } +- if (struct.isSetEnd_token()) { +- oprot.writeString(struct.end_token); +- } +- if (struct.isSetRow_filter()) { +- { +- oprot.writeI32(struct.row_filter.size()); +- for (IndexExpression _iter36 : struct.row_filter) +- { +- _iter36.write(oprot); +- } +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, KeyRange struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- BitSet incoming = iprot.readBitSet(5); +- if (incoming.get(0)) { +- struct.start_key = iprot.readBinary(); +- struct.setStart_keyIsSet(true); +- } +- if (incoming.get(1)) { +- struct.end_key = iprot.readBinary(); +- struct.setEnd_keyIsSet(true); +- } +- if (incoming.get(2)) { +- struct.start_token = iprot.readString(); +- struct.setStart_tokenIsSet(true); +- } +- if (incoming.get(3)) { +- struct.end_token = iprot.readString(); +- struct.setEnd_tokenIsSet(true); +- } +- if (incoming.get(4)) { +- { +- org.apache.thrift.protocol.TList _list37 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.row_filter = new ArrayList(_list37.size); +- for (int _i38 = 0; _i38 < _list37.size; ++_i38) +- { +- IndexExpression _elem39; +- _elem39 = new IndexExpression(); +- _elem39.read(iprot); +- struct.row_filter.add(_elem39); +- } +- } +- struct.setRow_filterIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/KeySlice.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/KeySlice.java +deleted file mode 100644 +index df4beb1..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/KeySlice.java ++++ /dev/null +@@ -1,583 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * A KeySlice is key followed by the data it maps to. A collection of KeySlice is returned by the get_range_slice operation. +- * +- * @param key. a row key +- * @param columns. List of data represented by the key. Typically, the list is pared down to only the columns specified by +- * a SlicePredicate. +- */ +-public class KeySlice implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("KeySlice"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new KeySliceStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new KeySliceTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // required +- public List columns; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COLUMNS((short)2, "columns"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COLUMNS +- return COLUMNS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnOrSuperColumn.class)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(KeySlice.class, metaDataMap); +- } +- +- public KeySlice() { +- } +- +- public KeySlice( +- ByteBuffer key, +- List columns) +- { +- this(); +- this.key = key; +- this.columns = columns; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public KeySlice(KeySlice other) { +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetColumns()) { +- List __this__columns = new ArrayList(other.columns.size()); +- for (ColumnOrSuperColumn other_element : other.columns) { +- __this__columns.add(new ColumnOrSuperColumn(other_element)); +- } +- this.columns = __this__columns; +- } +- } +- +- public KeySlice deepCopy() { +- return new KeySlice(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.columns = null; +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public KeySlice setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public KeySlice setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public int getColumnsSize() { +- return (this.columns == null) ? 0 : this.columns.size(); +- } +- +- public java.util.Iterator getColumnsIterator() { +- return (this.columns == null) ? null : this.columns.iterator(); +- } +- +- public void addToColumns(ColumnOrSuperColumn elem) { +- if (this.columns == null) { +- this.columns = new ArrayList(); +- } +- this.columns.add(elem); +- } +- +- public List getColumns() { +- return this.columns; +- } +- +- public KeySlice setColumns(List columns) { +- this.columns = columns; +- return this; +- } +- +- public void unsetColumns() { +- this.columns = null; +- } +- +- /** Returns true if field columns is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumns() { +- return this.columns != null; +- } +- +- public void setColumnsIsSet(boolean value) { +- if (!value) { +- this.columns = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COLUMNS: +- if (value == null) { +- unsetColumns(); +- } else { +- setColumns((List)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COLUMNS: +- return getColumns(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COLUMNS: +- return isSetColumns(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof KeySlice) +- return this.equals((KeySlice)that); +- return false; +- } +- +- public boolean equals(KeySlice that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_columns = true && this.isSetColumns(); +- boolean that_present_columns = true && that.isSetColumns(); +- if (this_present_columns || that_present_columns) { +- if (!(this_present_columns && that_present_columns)) +- return false; +- if (!this.columns.equals(that.columns)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_columns = true && (isSetColumns()); +- builder.append(present_columns); +- if (present_columns) +- builder.append(columns); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(KeySlice other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumns()).compareTo(other.isSetColumns()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumns()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columns, other.columns); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("KeySlice("); +- boolean first = true; +- +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("columns:"); +- if (this.columns == null) { +- sb.append("null"); +- } else { +- sb.append(this.columns); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (key == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'key' was not present! Struct: " + toString()); +- } +- if (columns == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'columns' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class KeySliceStandardSchemeFactory implements SchemeFactory { +- public KeySliceStandardScheme getScheme() { +- return new KeySliceStandardScheme(); +- } +- } +- +- private static class KeySliceStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, KeySlice struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMNS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list40 = iprot.readListBegin(); +- struct.columns = new ArrayList(_list40.size); +- for (int _i41 = 0; _i41 < _list40.size; ++_i41) +- { +- ColumnOrSuperColumn _elem42; +- _elem42 = new ColumnOrSuperColumn(); +- _elem42.read(iprot); +- struct.columns.add(_elem42); +- } +- iprot.readListEnd(); +- } +- struct.setColumnsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, KeySlice struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- if (struct.columns != null) { +- oprot.writeFieldBegin(COLUMNS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size())); +- for (ColumnOrSuperColumn _iter43 : struct.columns) +- { +- _iter43.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class KeySliceTupleSchemeFactory implements SchemeFactory { +- public KeySliceTupleScheme getScheme() { +- return new KeySliceTupleScheme(); +- } +- } +- +- private static class KeySliceTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, KeySlice struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.key); +- { +- oprot.writeI32(struct.columns.size()); +- for (ColumnOrSuperColumn _iter44 : struct.columns) +- { +- _iter44.write(oprot); +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, KeySlice struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- { +- org.apache.thrift.protocol.TList _list45 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.columns = new ArrayList(_list45.size); +- for (int _i46 = 0; _i46 < _list45.size; ++_i46) +- { +- ColumnOrSuperColumn _elem47; +- _elem47 = new ColumnOrSuperColumn(); +- _elem47.read(iprot); +- struct.columns.add(_elem47); +- } +- } +- struct.setColumnsIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/KsDef.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/KsDef.java +deleted file mode 100644 +index cd2a938..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/KsDef.java ++++ /dev/null +@@ -1,1047 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class KsDef implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("KsDef"); +- +- private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField STRATEGY_CLASS_FIELD_DESC = new org.apache.thrift.protocol.TField("strategy_class", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField STRATEGY_OPTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("strategy_options", org.apache.thrift.protocol.TType.MAP, (short)3); +- private static final org.apache.thrift.protocol.TField REPLICATION_FACTOR_FIELD_DESC = new org.apache.thrift.protocol.TField("replication_factor", org.apache.thrift.protocol.TType.I32, (short)4); +- private static final org.apache.thrift.protocol.TField CF_DEFS_FIELD_DESC = new org.apache.thrift.protocol.TField("cf_defs", org.apache.thrift.protocol.TType.LIST, (short)5); +- private static final org.apache.thrift.protocol.TField DURABLE_WRITES_FIELD_DESC = new org.apache.thrift.protocol.TField("durable_writes", org.apache.thrift.protocol.TType.BOOL, (short)6); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new KsDefStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new KsDefTupleSchemeFactory()); +- } +- +- public String name; // required +- public String strategy_class; // required +- public Map strategy_options; // optional +- /** +- * @deprecated ignored +- */ +- public int replication_factor; // optional +- public List cf_defs; // required +- public boolean durable_writes; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- NAME((short)1, "name"), +- STRATEGY_CLASS((short)2, "strategy_class"), +- STRATEGY_OPTIONS((short)3, "strategy_options"), +- /** +- * @deprecated ignored +- */ +- REPLICATION_FACTOR((short)4, "replication_factor"), +- CF_DEFS((short)5, "cf_defs"), +- DURABLE_WRITES((short)6, "durable_writes"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // NAME +- return NAME; +- case 2: // STRATEGY_CLASS +- return STRATEGY_CLASS; +- case 3: // STRATEGY_OPTIONS +- return STRATEGY_OPTIONS; +- case 4: // REPLICATION_FACTOR +- return REPLICATION_FACTOR; +- case 5: // CF_DEFS +- return CF_DEFS; +- case 6: // DURABLE_WRITES +- return DURABLE_WRITES; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __REPLICATION_FACTOR_ISSET_ID = 0; +- private static final int __DURABLE_WRITES_ISSET_ID = 1; +- private byte __isset_bitfield = 0; +- private _Fields optionals[] = {_Fields.STRATEGY_OPTIONS,_Fields.REPLICATION_FACTOR,_Fields.DURABLE_WRITES}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.STRATEGY_CLASS, new org.apache.thrift.meta_data.FieldMetaData("strategy_class", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.STRATEGY_OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("strategy_options", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- tmpMap.put(_Fields.REPLICATION_FACTOR, new org.apache.thrift.meta_data.FieldMetaData("replication_factor", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.CF_DEFS, new org.apache.thrift.meta_data.FieldMetaData("cf_defs", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CfDef.class)))); +- tmpMap.put(_Fields.DURABLE_WRITES, new org.apache.thrift.meta_data.FieldMetaData("durable_writes", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(KsDef.class, metaDataMap); +- } +- +- public KsDef() { +- this.durable_writes = true; +- +- } +- +- public KsDef( +- String name, +- String strategy_class, +- List cf_defs) +- { +- this(); +- this.name = name; +- this.strategy_class = strategy_class; +- this.cf_defs = cf_defs; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public KsDef(KsDef other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetName()) { +- this.name = other.name; +- } +- if (other.isSetStrategy_class()) { +- this.strategy_class = other.strategy_class; +- } +- if (other.isSetStrategy_options()) { +- Map __this__strategy_options = new HashMap(other.strategy_options); +- this.strategy_options = __this__strategy_options; +- } +- this.replication_factor = other.replication_factor; +- if (other.isSetCf_defs()) { +- List __this__cf_defs = new ArrayList(other.cf_defs.size()); +- for (CfDef other_element : other.cf_defs) { +- __this__cf_defs.add(new CfDef(other_element)); +- } +- this.cf_defs = __this__cf_defs; +- } +- this.durable_writes = other.durable_writes; +- } +- +- public KsDef deepCopy() { +- return new KsDef(this); +- } +- +- @Override +- public void clear() { +- this.name = null; +- this.strategy_class = null; +- this.strategy_options = null; +- setReplication_factorIsSet(false); +- this.replication_factor = 0; +- this.cf_defs = null; +- this.durable_writes = true; +- +- } +- +- public String getName() { +- return this.name; +- } +- +- public KsDef setName(String name) { +- this.name = name; +- return this; +- } +- +- public void unsetName() { +- this.name = null; +- } +- +- /** Returns true if field name is set (has been assigned a value) and false otherwise */ +- public boolean isSetName() { +- return this.name != null; +- } +- +- public void setNameIsSet(boolean value) { +- if (!value) { +- this.name = null; +- } +- } +- +- public String getStrategy_class() { +- return this.strategy_class; +- } +- +- public KsDef setStrategy_class(String strategy_class) { +- this.strategy_class = strategy_class; +- return this; +- } +- +- public void unsetStrategy_class() { +- this.strategy_class = null; +- } +- +- /** Returns true if field strategy_class is set (has been assigned a value) and false otherwise */ +- public boolean isSetStrategy_class() { +- return this.strategy_class != null; +- } +- +- public void setStrategy_classIsSet(boolean value) { +- if (!value) { +- this.strategy_class = null; +- } +- } +- +- public int getStrategy_optionsSize() { +- return (this.strategy_options == null) ? 0 : this.strategy_options.size(); +- } +- +- public void putToStrategy_options(String key, String val) { +- if (this.strategy_options == null) { +- this.strategy_options = new HashMap(); +- } +- this.strategy_options.put(key, val); +- } +- +- public Map getStrategy_options() { +- return this.strategy_options; +- } +- +- public KsDef setStrategy_options(Map strategy_options) { +- this.strategy_options = strategy_options; +- return this; +- } +- +- public void unsetStrategy_options() { +- this.strategy_options = null; +- } +- +- /** Returns true if field strategy_options is set (has been assigned a value) and false otherwise */ +- public boolean isSetStrategy_options() { +- return this.strategy_options != null; +- } +- +- public void setStrategy_optionsIsSet(boolean value) { +- if (!value) { +- this.strategy_options = null; +- } +- } +- +- /** +- * @deprecated ignored +- */ +- public int getReplication_factor() { +- return this.replication_factor; +- } +- +- /** +- * @deprecated ignored +- */ +- public KsDef setReplication_factor(int replication_factor) { +- this.replication_factor = replication_factor; +- setReplication_factorIsSet(true); +- return this; +- } +- +- public void unsetReplication_factor() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REPLICATION_FACTOR_ISSET_ID); +- } +- +- /** Returns true if field replication_factor is set (has been assigned a value) and false otherwise */ +- public boolean isSetReplication_factor() { +- return EncodingUtils.testBit(__isset_bitfield, __REPLICATION_FACTOR_ISSET_ID); +- } +- +- public void setReplication_factorIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REPLICATION_FACTOR_ISSET_ID, value); +- } +- +- public int getCf_defsSize() { +- return (this.cf_defs == null) ? 0 : this.cf_defs.size(); +- } +- +- public java.util.Iterator getCf_defsIterator() { +- return (this.cf_defs == null) ? null : this.cf_defs.iterator(); +- } +- +- public void addToCf_defs(CfDef elem) { +- if (this.cf_defs == null) { +- this.cf_defs = new ArrayList(); +- } +- this.cf_defs.add(elem); +- } +- +- public List getCf_defs() { +- return this.cf_defs; +- } +- +- public KsDef setCf_defs(List cf_defs) { +- this.cf_defs = cf_defs; +- return this; +- } +- +- public void unsetCf_defs() { +- this.cf_defs = null; +- } +- +- /** Returns true if field cf_defs is set (has been assigned a value) and false otherwise */ +- public boolean isSetCf_defs() { +- return this.cf_defs != null; +- } +- +- public void setCf_defsIsSet(boolean value) { +- if (!value) { +- this.cf_defs = null; +- } +- } +- +- public boolean isDurable_writes() { +- return this.durable_writes; +- } +- +- public KsDef setDurable_writes(boolean durable_writes) { +- this.durable_writes = durable_writes; +- setDurable_writesIsSet(true); +- return this; +- } +- +- public void unsetDurable_writes() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DURABLE_WRITES_ISSET_ID); +- } +- +- /** Returns true if field durable_writes is set (has been assigned a value) and false otherwise */ +- public boolean isSetDurable_writes() { +- return EncodingUtils.testBit(__isset_bitfield, __DURABLE_WRITES_ISSET_ID); +- } +- +- public void setDurable_writesIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DURABLE_WRITES_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case NAME: +- if (value == null) { +- unsetName(); +- } else { +- setName((String)value); +- } +- break; +- +- case STRATEGY_CLASS: +- if (value == null) { +- unsetStrategy_class(); +- } else { +- setStrategy_class((String)value); +- } +- break; +- +- case STRATEGY_OPTIONS: +- if (value == null) { +- unsetStrategy_options(); +- } else { +- setStrategy_options((Map)value); +- } +- break; +- +- case REPLICATION_FACTOR: +- if (value == null) { +- unsetReplication_factor(); +- } else { +- setReplication_factor((Integer)value); +- } +- break; +- +- case CF_DEFS: +- if (value == null) { +- unsetCf_defs(); +- } else { +- setCf_defs((List)value); +- } +- break; +- +- case DURABLE_WRITES: +- if (value == null) { +- unsetDurable_writes(); +- } else { +- setDurable_writes((Boolean)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case NAME: +- return getName(); +- +- case STRATEGY_CLASS: +- return getStrategy_class(); +- +- case STRATEGY_OPTIONS: +- return getStrategy_options(); +- +- case REPLICATION_FACTOR: +- return Integer.valueOf(getReplication_factor()); +- +- case CF_DEFS: +- return getCf_defs(); +- +- case DURABLE_WRITES: +- return Boolean.valueOf(isDurable_writes()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case NAME: +- return isSetName(); +- case STRATEGY_CLASS: +- return isSetStrategy_class(); +- case STRATEGY_OPTIONS: +- return isSetStrategy_options(); +- case REPLICATION_FACTOR: +- return isSetReplication_factor(); +- case CF_DEFS: +- return isSetCf_defs(); +- case DURABLE_WRITES: +- return isSetDurable_writes(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof KsDef) +- return this.equals((KsDef)that); +- return false; +- } +- +- public boolean equals(KsDef that) { +- if (that == null) +- return false; +- +- boolean this_present_name = true && this.isSetName(); +- boolean that_present_name = true && that.isSetName(); +- if (this_present_name || that_present_name) { +- if (!(this_present_name && that_present_name)) +- return false; +- if (!this.name.equals(that.name)) +- return false; +- } +- +- boolean this_present_strategy_class = true && this.isSetStrategy_class(); +- boolean that_present_strategy_class = true && that.isSetStrategy_class(); +- if (this_present_strategy_class || that_present_strategy_class) { +- if (!(this_present_strategy_class && that_present_strategy_class)) +- return false; +- if (!this.strategy_class.equals(that.strategy_class)) +- return false; +- } +- +- boolean this_present_strategy_options = true && this.isSetStrategy_options(); +- boolean that_present_strategy_options = true && that.isSetStrategy_options(); +- if (this_present_strategy_options || that_present_strategy_options) { +- if (!(this_present_strategy_options && that_present_strategy_options)) +- return false; +- if (!this.strategy_options.equals(that.strategy_options)) +- return false; +- } +- +- boolean this_present_replication_factor = true && this.isSetReplication_factor(); +- boolean that_present_replication_factor = true && that.isSetReplication_factor(); +- if (this_present_replication_factor || that_present_replication_factor) { +- if (!(this_present_replication_factor && that_present_replication_factor)) +- return false; +- if (this.replication_factor != that.replication_factor) +- return false; +- } +- +- boolean this_present_cf_defs = true && this.isSetCf_defs(); +- boolean that_present_cf_defs = true && that.isSetCf_defs(); +- if (this_present_cf_defs || that_present_cf_defs) { +- if (!(this_present_cf_defs && that_present_cf_defs)) +- return false; +- if (!this.cf_defs.equals(that.cf_defs)) +- return false; +- } +- +- boolean this_present_durable_writes = true && this.isSetDurable_writes(); +- boolean that_present_durable_writes = true && that.isSetDurable_writes(); +- if (this_present_durable_writes || that_present_durable_writes) { +- if (!(this_present_durable_writes && that_present_durable_writes)) +- return false; +- if (this.durable_writes != that.durable_writes) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_name = true && (isSetName()); +- builder.append(present_name); +- if (present_name) +- builder.append(name); +- +- boolean present_strategy_class = true && (isSetStrategy_class()); +- builder.append(present_strategy_class); +- if (present_strategy_class) +- builder.append(strategy_class); +- +- boolean present_strategy_options = true && (isSetStrategy_options()); +- builder.append(present_strategy_options); +- if (present_strategy_options) +- builder.append(strategy_options); +- +- boolean present_replication_factor = true && (isSetReplication_factor()); +- builder.append(present_replication_factor); +- if (present_replication_factor) +- builder.append(replication_factor); +- +- boolean present_cf_defs = true && (isSetCf_defs()); +- builder.append(present_cf_defs); +- if (present_cf_defs) +- builder.append(cf_defs); +- +- boolean present_durable_writes = true && (isSetDurable_writes()); +- builder.append(present_durable_writes); +- if (present_durable_writes) +- builder.append(durable_writes); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(KsDef other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetName()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetStrategy_class()).compareTo(other.isSetStrategy_class()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStrategy_class()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.strategy_class, other.strategy_class); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetStrategy_options()).compareTo(other.isSetStrategy_options()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStrategy_options()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.strategy_options, other.strategy_options); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetReplication_factor()).compareTo(other.isSetReplication_factor()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetReplication_factor()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replication_factor, other.replication_factor); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCf_defs()).compareTo(other.isSetCf_defs()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCf_defs()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cf_defs, other.cf_defs); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetDurable_writes()).compareTo(other.isSetDurable_writes()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetDurable_writes()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.durable_writes, other.durable_writes); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("KsDef("); +- boolean first = true; +- +- sb.append("name:"); +- if (this.name == null) { +- sb.append("null"); +- } else { +- sb.append(this.name); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("strategy_class:"); +- if (this.strategy_class == null) { +- sb.append("null"); +- } else { +- sb.append(this.strategy_class); +- } +- first = false; +- if (isSetStrategy_options()) { +- if (!first) sb.append(", "); +- sb.append("strategy_options:"); +- if (this.strategy_options == null) { +- sb.append("null"); +- } else { +- sb.append(this.strategy_options); +- } +- first = false; +- } +- if (isSetReplication_factor()) { +- if (!first) sb.append(", "); +- sb.append("replication_factor:"); +- sb.append(this.replication_factor); +- first = false; +- } +- if (!first) sb.append(", "); +- sb.append("cf_defs:"); +- if (this.cf_defs == null) { +- sb.append("null"); +- } else { +- sb.append(this.cf_defs); +- } +- first = false; +- if (isSetDurable_writes()) { +- if (!first) sb.append(", "); +- sb.append("durable_writes:"); +- sb.append(this.durable_writes); +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (name == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString()); +- } +- if (strategy_class == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'strategy_class' was not present! Struct: " + toString()); +- } +- if (cf_defs == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'cf_defs' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class KsDefStandardSchemeFactory implements SchemeFactory { +- public KsDefStandardScheme getScheme() { +- return new KsDefStandardScheme(); +- } +- } +- +- private static class KsDefStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, KsDef struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.name = iprot.readString(); +- struct.setNameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // STRATEGY_CLASS +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.strategy_class = iprot.readString(); +- struct.setStrategy_classIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // STRATEGY_OPTIONS +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map146 = iprot.readMapBegin(); +- struct.strategy_options = new HashMap(2*_map146.size); +- for (int _i147 = 0; _i147 < _map146.size; ++_i147) +- { +- String _key148; +- String _val149; +- _key148 = iprot.readString(); +- _val149 = iprot.readString(); +- struct.strategy_options.put(_key148, _val149); +- } +- iprot.readMapEnd(); +- } +- struct.setStrategy_optionsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // REPLICATION_FACTOR +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.replication_factor = iprot.readI32(); +- struct.setReplication_factorIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 5: // CF_DEFS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list150 = iprot.readListBegin(); +- struct.cf_defs = new ArrayList(_list150.size); +- for (int _i151 = 0; _i151 < _list150.size; ++_i151) +- { +- CfDef _elem152; +- _elem152 = new CfDef(); +- _elem152.read(iprot); +- struct.cf_defs.add(_elem152); +- } +- iprot.readListEnd(); +- } +- struct.setCf_defsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 6: // DURABLE_WRITES +- if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { +- struct.durable_writes = iprot.readBool(); +- struct.setDurable_writesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, KsDef struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.name != null) { +- oprot.writeFieldBegin(NAME_FIELD_DESC); +- oprot.writeString(struct.name); +- oprot.writeFieldEnd(); +- } +- if (struct.strategy_class != null) { +- oprot.writeFieldBegin(STRATEGY_CLASS_FIELD_DESC); +- oprot.writeString(struct.strategy_class); +- oprot.writeFieldEnd(); +- } +- if (struct.strategy_options != null) { +- if (struct.isSetStrategy_options()) { +- oprot.writeFieldBegin(STRATEGY_OPTIONS_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.strategy_options.size())); +- for (Map.Entry _iter153 : struct.strategy_options.entrySet()) +- { +- oprot.writeString(_iter153.getKey()); +- oprot.writeString(_iter153.getValue()); +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetReplication_factor()) { +- oprot.writeFieldBegin(REPLICATION_FACTOR_FIELD_DESC); +- oprot.writeI32(struct.replication_factor); +- oprot.writeFieldEnd(); +- } +- if (struct.cf_defs != null) { +- oprot.writeFieldBegin(CF_DEFS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cf_defs.size())); +- for (CfDef _iter154 : struct.cf_defs) +- { +- _iter154.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.isSetDurable_writes()) { +- oprot.writeFieldBegin(DURABLE_WRITES_FIELD_DESC); +- oprot.writeBool(struct.durable_writes); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class KsDefTupleSchemeFactory implements SchemeFactory { +- public KsDefTupleScheme getScheme() { +- return new KsDefTupleScheme(); +- } +- } +- +- private static class KsDefTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, KsDef struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.name); +- oprot.writeString(struct.strategy_class); +- { +- oprot.writeI32(struct.cf_defs.size()); +- for (CfDef _iter155 : struct.cf_defs) +- { +- _iter155.write(oprot); +- } +- } +- BitSet optionals = new BitSet(); +- if (struct.isSetStrategy_options()) { +- optionals.set(0); +- } +- if (struct.isSetReplication_factor()) { +- optionals.set(1); +- } +- if (struct.isSetDurable_writes()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetStrategy_options()) { +- { +- oprot.writeI32(struct.strategy_options.size()); +- for (Map.Entry _iter156 : struct.strategy_options.entrySet()) +- { +- oprot.writeString(_iter156.getKey()); +- oprot.writeString(_iter156.getValue()); +- } +- } +- } +- if (struct.isSetReplication_factor()) { +- oprot.writeI32(struct.replication_factor); +- } +- if (struct.isSetDurable_writes()) { +- oprot.writeBool(struct.durable_writes); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, KsDef struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.name = iprot.readString(); +- struct.setNameIsSet(true); +- struct.strategy_class = iprot.readString(); +- struct.setStrategy_classIsSet(true); +- { +- org.apache.thrift.protocol.TList _list157 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.cf_defs = new ArrayList(_list157.size); +- for (int _i158 = 0; _i158 < _list157.size; ++_i158) +- { +- CfDef _elem159; +- _elem159 = new CfDef(); +- _elem159.read(iprot); +- struct.cf_defs.add(_elem159); +- } +- } +- struct.setCf_defsIsSet(true); +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TMap _map160 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.strategy_options = new HashMap(2*_map160.size); +- for (int _i161 = 0; _i161 < _map160.size; ++_i161) +- { +- String _key162; +- String _val163; +- _key162 = iprot.readString(); +- _val163 = iprot.readString(); +- struct.strategy_options.put(_key162, _val163); +- } +- } +- struct.setStrategy_optionsIsSet(true); +- } +- if (incoming.get(1)) { +- struct.replication_factor = iprot.readI32(); +- struct.setReplication_factorIsSet(true); +- } +- if (incoming.get(2)) { +- struct.durable_writes = iprot.readBool(); +- struct.setDurable_writesIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/MultiSliceRequest.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/MultiSliceRequest.java +deleted file mode 100644 +index 9d4878c..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/MultiSliceRequest.java ++++ /dev/null +@@ -1,1042 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Used to perform multiple slices on a single row key in one rpc operation +- * @param key. The row key to be multi sliced +- * @param column_parent. The column family (super columns are unsupported) +- * @param column_slices. 0 to many ColumnSlice objects each will be used to select columns +- * @param reversed. Direction of slice +- * @param count. Maximum number of columns +- * @param consistency_level. Level to perform the operation at +- */ +-public class MultiSliceRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MultiSliceRequest"); +- +- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMN_PARENT_FIELD_DESC = new org.apache.thrift.protocol.TField("column_parent", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- private static final org.apache.thrift.protocol.TField COLUMN_SLICES_FIELD_DESC = new org.apache.thrift.protocol.TField("column_slices", org.apache.thrift.protocol.TType.LIST, (short)3); +- private static final org.apache.thrift.protocol.TField REVERSED_FIELD_DESC = new org.apache.thrift.protocol.TField("reversed", org.apache.thrift.protocol.TType.BOOL, (short)4); +- private static final org.apache.thrift.protocol.TField COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("count", org.apache.thrift.protocol.TType.I32, (short)5); +- private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)6); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new MultiSliceRequestStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new MultiSliceRequestTupleSchemeFactory()); +- } +- +- public ByteBuffer key; // optional +- public ColumnParent column_parent; // optional +- public List column_slices; // optional +- public boolean reversed; // optional +- public int count; // optional +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel consistency_level; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- KEY((short)1, "key"), +- COLUMN_PARENT((short)2, "column_parent"), +- COLUMN_SLICES((short)3, "column_slices"), +- REVERSED((short)4, "reversed"), +- COUNT((short)5, "count"), +- /** +- * +- * @see ConsistencyLevel +- */ +- CONSISTENCY_LEVEL((short)6, "consistency_level"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // KEY +- return KEY; +- case 2: // COLUMN_PARENT +- return COLUMN_PARENT; +- case 3: // COLUMN_SLICES +- return COLUMN_SLICES; +- case 4: // REVERSED +- return REVERSED; +- case 5: // COUNT +- return COUNT; +- case 6: // CONSISTENCY_LEVEL +- return CONSISTENCY_LEVEL; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __REVERSED_ISSET_ID = 0; +- private static final int __COUNT_ISSET_ID = 1; +- private byte __isset_bitfield = 0; +- private _Fields optionals[] = {_Fields.KEY,_Fields.COLUMN_PARENT,_Fields.COLUMN_SLICES,_Fields.REVERSED,_Fields.COUNT,_Fields.CONSISTENCY_LEVEL}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMN_PARENT, new org.apache.thrift.meta_data.FieldMetaData("column_parent", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnParent.class))); +- tmpMap.put(_Fields.COLUMN_SLICES, new org.apache.thrift.meta_data.FieldMetaData("column_slices", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnSlice.class)))); +- tmpMap.put(_Fields.REVERSED, new org.apache.thrift.meta_data.FieldMetaData("reversed", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); +- tmpMap.put(_Fields.COUNT, new org.apache.thrift.meta_data.FieldMetaData("count", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MultiSliceRequest.class, metaDataMap); +- } +- +- public MultiSliceRequest() { +- this.reversed = false; +- +- this.count = 1000; +- +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public MultiSliceRequest(MultiSliceRequest other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetKey()) { +- this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key); +-; +- } +- if (other.isSetColumn_parent()) { +- this.column_parent = new ColumnParent(other.column_parent); +- } +- if (other.isSetColumn_slices()) { +- List __this__column_slices = new ArrayList(other.column_slices.size()); +- for (ColumnSlice other_element : other.column_slices) { +- __this__column_slices.add(new ColumnSlice(other_element)); +- } +- this.column_slices = __this__column_slices; +- } +- this.reversed = other.reversed; +- this.count = other.count; +- if (other.isSetConsistency_level()) { +- this.consistency_level = other.consistency_level; +- } +- } +- +- public MultiSliceRequest deepCopy() { +- return new MultiSliceRequest(this); +- } +- +- @Override +- public void clear() { +- this.key = null; +- this.column_parent = null; +- this.column_slices = null; +- this.reversed = false; +- +- this.count = 1000; +- +- this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE; +- +- } +- +- public byte[] getKey() { +- setKey(org.apache.thrift.TBaseHelper.rightSize(key)); +- return key == null ? null : key.array(); +- } +- +- public ByteBuffer bufferForKey() { +- return key; +- } +- +- public MultiSliceRequest setKey(byte[] key) { +- setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key)); +- return this; +- } +- +- public MultiSliceRequest setKey(ByteBuffer key) { +- this.key = key; +- return this; +- } +- +- public void unsetKey() { +- this.key = null; +- } +- +- /** Returns true if field key is set (has been assigned a value) and false otherwise */ +- public boolean isSetKey() { +- return this.key != null; +- } +- +- public void setKeyIsSet(boolean value) { +- if (!value) { +- this.key = null; +- } +- } +- +- public ColumnParent getColumn_parent() { +- return this.column_parent; +- } +- +- public MultiSliceRequest setColumn_parent(ColumnParent column_parent) { +- this.column_parent = column_parent; +- return this; +- } +- +- public void unsetColumn_parent() { +- this.column_parent = null; +- } +- +- /** Returns true if field column_parent is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_parent() { +- return this.column_parent != null; +- } +- +- public void setColumn_parentIsSet(boolean value) { +- if (!value) { +- this.column_parent = null; +- } +- } +- +- public int getColumn_slicesSize() { +- return (this.column_slices == null) ? 0 : this.column_slices.size(); +- } +- +- public java.util.Iterator getColumn_slicesIterator() { +- return (this.column_slices == null) ? null : this.column_slices.iterator(); +- } +- +- public void addToColumn_slices(ColumnSlice elem) { +- if (this.column_slices == null) { +- this.column_slices = new ArrayList(); +- } +- this.column_slices.add(elem); +- } +- +- public List getColumn_slices() { +- return this.column_slices; +- } +- +- public MultiSliceRequest setColumn_slices(List column_slices) { +- this.column_slices = column_slices; +- return this; +- } +- +- public void unsetColumn_slices() { +- this.column_slices = null; +- } +- +- /** Returns true if field column_slices is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_slices() { +- return this.column_slices != null; +- } +- +- public void setColumn_slicesIsSet(boolean value) { +- if (!value) { +- this.column_slices = null; +- } +- } +- +- public boolean isReversed() { +- return this.reversed; +- } +- +- public MultiSliceRequest setReversed(boolean reversed) { +- this.reversed = reversed; +- setReversedIsSet(true); +- return this; +- } +- +- public void unsetReversed() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REVERSED_ISSET_ID); +- } +- +- /** Returns true if field reversed is set (has been assigned a value) and false otherwise */ +- public boolean isSetReversed() { +- return EncodingUtils.testBit(__isset_bitfield, __REVERSED_ISSET_ID); +- } +- +- public void setReversedIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REVERSED_ISSET_ID, value); +- } +- +- public int getCount() { +- return this.count; +- } +- +- public MultiSliceRequest setCount(int count) { +- this.count = count; +- setCountIsSet(true); +- return this; +- } +- +- public void unsetCount() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- /** Returns true if field count is set (has been assigned a value) and false otherwise */ +- public boolean isSetCount() { +- return EncodingUtils.testBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- public void setCountIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COUNT_ISSET_ID, value); +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public ConsistencyLevel getConsistency_level() { +- return this.consistency_level; +- } +- +- /** +- * +- * @see ConsistencyLevel +- */ +- public MultiSliceRequest setConsistency_level(ConsistencyLevel consistency_level) { +- this.consistency_level = consistency_level; +- return this; +- } +- +- public void unsetConsistency_level() { +- this.consistency_level = null; +- } +- +- /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */ +- public boolean isSetConsistency_level() { +- return this.consistency_level != null; +- } +- +- public void setConsistency_levelIsSet(boolean value) { +- if (!value) { +- this.consistency_level = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case KEY: +- if (value == null) { +- unsetKey(); +- } else { +- setKey((ByteBuffer)value); +- } +- break; +- +- case COLUMN_PARENT: +- if (value == null) { +- unsetColumn_parent(); +- } else { +- setColumn_parent((ColumnParent)value); +- } +- break; +- +- case COLUMN_SLICES: +- if (value == null) { +- unsetColumn_slices(); +- } else { +- setColumn_slices((List)value); +- } +- break; +- +- case REVERSED: +- if (value == null) { +- unsetReversed(); +- } else { +- setReversed((Boolean)value); +- } +- break; +- +- case COUNT: +- if (value == null) { +- unsetCount(); +- } else { +- setCount((Integer)value); +- } +- break; +- +- case CONSISTENCY_LEVEL: +- if (value == null) { +- unsetConsistency_level(); +- } else { +- setConsistency_level((ConsistencyLevel)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case KEY: +- return getKey(); +- +- case COLUMN_PARENT: +- return getColumn_parent(); +- +- case COLUMN_SLICES: +- return getColumn_slices(); +- +- case REVERSED: +- return Boolean.valueOf(isReversed()); +- +- case COUNT: +- return Integer.valueOf(getCount()); +- +- case CONSISTENCY_LEVEL: +- return getConsistency_level(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case KEY: +- return isSetKey(); +- case COLUMN_PARENT: +- return isSetColumn_parent(); +- case COLUMN_SLICES: +- return isSetColumn_slices(); +- case REVERSED: +- return isSetReversed(); +- case COUNT: +- return isSetCount(); +- case CONSISTENCY_LEVEL: +- return isSetConsistency_level(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof MultiSliceRequest) +- return this.equals((MultiSliceRequest)that); +- return false; +- } +- +- public boolean equals(MultiSliceRequest that) { +- if (that == null) +- return false; +- +- boolean this_present_key = true && this.isSetKey(); +- boolean that_present_key = true && that.isSetKey(); +- if (this_present_key || that_present_key) { +- if (!(this_present_key && that_present_key)) +- return false; +- if (!this.key.equals(that.key)) +- return false; +- } +- +- boolean this_present_column_parent = true && this.isSetColumn_parent(); +- boolean that_present_column_parent = true && that.isSetColumn_parent(); +- if (this_present_column_parent || that_present_column_parent) { +- if (!(this_present_column_parent && that_present_column_parent)) +- return false; +- if (!this.column_parent.equals(that.column_parent)) +- return false; +- } +- +- boolean this_present_column_slices = true && this.isSetColumn_slices(); +- boolean that_present_column_slices = true && that.isSetColumn_slices(); +- if (this_present_column_slices || that_present_column_slices) { +- if (!(this_present_column_slices && that_present_column_slices)) +- return false; +- if (!this.column_slices.equals(that.column_slices)) +- return false; +- } +- +- boolean this_present_reversed = true && this.isSetReversed(); +- boolean that_present_reversed = true && that.isSetReversed(); +- if (this_present_reversed || that_present_reversed) { +- if (!(this_present_reversed && that_present_reversed)) +- return false; +- if (this.reversed != that.reversed) +- return false; +- } +- +- boolean this_present_count = true && this.isSetCount(); +- boolean that_present_count = true && that.isSetCount(); +- if (this_present_count || that_present_count) { +- if (!(this_present_count && that_present_count)) +- return false; +- if (this.count != that.count) +- return false; +- } +- +- boolean this_present_consistency_level = true && this.isSetConsistency_level(); +- boolean that_present_consistency_level = true && that.isSetConsistency_level(); +- if (this_present_consistency_level || that_present_consistency_level) { +- if (!(this_present_consistency_level && that_present_consistency_level)) +- return false; +- if (!this.consistency_level.equals(that.consistency_level)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_key = true && (isSetKey()); +- builder.append(present_key); +- if (present_key) +- builder.append(key); +- +- boolean present_column_parent = true && (isSetColumn_parent()); +- builder.append(present_column_parent); +- if (present_column_parent) +- builder.append(column_parent); +- +- boolean present_column_slices = true && (isSetColumn_slices()); +- builder.append(present_column_slices); +- if (present_column_slices) +- builder.append(column_slices); +- +- boolean present_reversed = true && (isSetReversed()); +- builder.append(present_reversed); +- if (present_reversed) +- builder.append(reversed); +- +- boolean present_count = true && (isSetCount()); +- builder.append(present_count); +- if (present_count) +- builder.append(count); +- +- boolean present_consistency_level = true && (isSetConsistency_level()); +- builder.append(present_consistency_level); +- if (present_consistency_level) +- builder.append(consistency_level.getValue()); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(MultiSliceRequest other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetKey()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_parent()).compareTo(other.isSetColumn_parent()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_parent()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_parent, other.column_parent); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumn_slices()).compareTo(other.isSetColumn_slices()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_slices()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_slices, other.column_slices); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetReversed()).compareTo(other.isSetReversed()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetReversed()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.reversed, other.reversed); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCount()).compareTo(other.isSetCount()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCount()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.count, other.count); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetConsistency_level()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("MultiSliceRequest("); +- boolean first = true; +- +- if (isSetKey()) { +- sb.append("key:"); +- if (this.key == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.key, sb); +- } +- first = false; +- } +- if (isSetColumn_parent()) { +- if (!first) sb.append(", "); +- sb.append("column_parent:"); +- if (this.column_parent == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_parent); +- } +- first = false; +- } +- if (isSetColumn_slices()) { +- if (!first) sb.append(", "); +- sb.append("column_slices:"); +- if (this.column_slices == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_slices); +- } +- first = false; +- } +- if (isSetReversed()) { +- if (!first) sb.append(", "); +- sb.append("reversed:"); +- sb.append(this.reversed); +- first = false; +- } +- if (isSetCount()) { +- if (!first) sb.append(", "); +- sb.append("count:"); +- sb.append(this.count); +- first = false; +- } +- if (isSetConsistency_level()) { +- if (!first) sb.append(", "); +- sb.append("consistency_level:"); +- if (this.consistency_level == null) { +- sb.append("null"); +- } else { +- sb.append(this.consistency_level); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (column_parent != null) { +- column_parent.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class MultiSliceRequestStandardSchemeFactory implements SchemeFactory { +- public MultiSliceRequestStandardScheme getScheme() { +- return new MultiSliceRequestStandardScheme(); +- } +- } +- +- private static class MultiSliceRequestStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, MultiSliceRequest struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // KEY +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMN_PARENT +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // COLUMN_SLICES +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list216 = iprot.readListBegin(); +- struct.column_slices = new ArrayList(_list216.size); +- for (int _i217 = 0; _i217 < _list216.size; ++_i217) +- { +- ColumnSlice _elem218; +- _elem218 = new ColumnSlice(); +- _elem218.read(iprot); +- struct.column_slices.add(_elem218); +- } +- iprot.readListEnd(); +- } +- struct.setColumn_slicesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // REVERSED +- if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { +- struct.reversed = iprot.readBool(); +- struct.setReversedIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 5: // COUNT +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 6: // CONSISTENCY_LEVEL +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, MultiSliceRequest struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.key != null) { +- if (struct.isSetKey()) { +- oprot.writeFieldBegin(KEY_FIELD_DESC); +- oprot.writeBinary(struct.key); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.column_parent != null) { +- if (struct.isSetColumn_parent()) { +- oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC); +- struct.column_parent.write(oprot); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.column_slices != null) { +- if (struct.isSetColumn_slices()) { +- oprot.writeFieldBegin(COLUMN_SLICES_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.column_slices.size())); +- for (ColumnSlice _iter219 : struct.column_slices) +- { +- _iter219.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- if (struct.isSetReversed()) { +- oprot.writeFieldBegin(REVERSED_FIELD_DESC); +- oprot.writeBool(struct.reversed); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetCount()) { +- oprot.writeFieldBegin(COUNT_FIELD_DESC); +- oprot.writeI32(struct.count); +- oprot.writeFieldEnd(); +- } +- if (struct.consistency_level != null) { +- if (struct.isSetConsistency_level()) { +- oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); +- oprot.writeI32(struct.consistency_level.getValue()); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class MultiSliceRequestTupleSchemeFactory implements SchemeFactory { +- public MultiSliceRequestTupleScheme getScheme() { +- return new MultiSliceRequestTupleScheme(); +- } +- } +- +- private static class MultiSliceRequestTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, MultiSliceRequest struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetKey()) { +- optionals.set(0); +- } +- if (struct.isSetColumn_parent()) { +- optionals.set(1); +- } +- if (struct.isSetColumn_slices()) { +- optionals.set(2); +- } +- if (struct.isSetReversed()) { +- optionals.set(3); +- } +- if (struct.isSetCount()) { +- optionals.set(4); +- } +- if (struct.isSetConsistency_level()) { +- optionals.set(5); +- } +- oprot.writeBitSet(optionals, 6); +- if (struct.isSetKey()) { +- oprot.writeBinary(struct.key); +- } +- if (struct.isSetColumn_parent()) { +- struct.column_parent.write(oprot); +- } +- if (struct.isSetColumn_slices()) { +- { +- oprot.writeI32(struct.column_slices.size()); +- for (ColumnSlice _iter220 : struct.column_slices) +- { +- _iter220.write(oprot); +- } +- } +- } +- if (struct.isSetReversed()) { +- oprot.writeBool(struct.reversed); +- } +- if (struct.isSetCount()) { +- oprot.writeI32(struct.count); +- } +- if (struct.isSetConsistency_level()) { +- oprot.writeI32(struct.consistency_level.getValue()); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, MultiSliceRequest struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(6); +- if (incoming.get(0)) { +- struct.key = iprot.readBinary(); +- struct.setKeyIsSet(true); +- } +- if (incoming.get(1)) { +- struct.column_parent = new ColumnParent(); +- struct.column_parent.read(iprot); +- struct.setColumn_parentIsSet(true); +- } +- if (incoming.get(2)) { +- { +- org.apache.thrift.protocol.TList _list221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.column_slices = new ArrayList(_list221.size); +- for (int _i222 = 0; _i222 < _list221.size; ++_i222) +- { +- ColumnSlice _elem223; +- _elem223 = new ColumnSlice(); +- _elem223.read(iprot); +- struct.column_slices.add(_elem223); +- } +- } +- struct.setColumn_slicesIsSet(true); +- } +- if (incoming.get(3)) { +- struct.reversed = iprot.readBool(); +- struct.setReversedIsSet(true); +- } +- if (incoming.get(4)) { +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- } +- if (incoming.get(5)) { +- struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); +- struct.setConsistency_levelIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/Mutation.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/Mutation.java +deleted file mode 100644 +index 981d5a4..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/Mutation.java ++++ /dev/null +@@ -1,537 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * A Mutation is either an insert (represented by filling column_or_supercolumn) or a deletion (represented by filling the deletion attribute). +- * @param column_or_supercolumn. An insert to a column or supercolumn (possibly counter column or supercolumn) +- * @param deletion. A deletion of a column or supercolumn +- */ +-public class Mutation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Mutation"); +- +- private static final org.apache.thrift.protocol.TField COLUMN_OR_SUPERCOLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column_or_supercolumn", org.apache.thrift.protocol.TType.STRUCT, (short)1); +- private static final org.apache.thrift.protocol.TField DELETION_FIELD_DESC = new org.apache.thrift.protocol.TField("deletion", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new MutationStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new MutationTupleSchemeFactory()); +- } +- +- public ColumnOrSuperColumn column_or_supercolumn; // optional +- public Deletion deletion; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- COLUMN_OR_SUPERCOLUMN((short)1, "column_or_supercolumn"), +- DELETION((short)2, "deletion"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // COLUMN_OR_SUPERCOLUMN +- return COLUMN_OR_SUPERCOLUMN; +- case 2: // DELETION +- return DELETION; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private _Fields optionals[] = {_Fields.COLUMN_OR_SUPERCOLUMN,_Fields.DELETION}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.COLUMN_OR_SUPERCOLUMN, new org.apache.thrift.meta_data.FieldMetaData("column_or_supercolumn", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnOrSuperColumn.class))); +- tmpMap.put(_Fields.DELETION, new org.apache.thrift.meta_data.FieldMetaData("deletion", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Deletion.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Mutation.class, metaDataMap); +- } +- +- public Mutation() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public Mutation(Mutation other) { +- if (other.isSetColumn_or_supercolumn()) { +- this.column_or_supercolumn = new ColumnOrSuperColumn(other.column_or_supercolumn); +- } +- if (other.isSetDeletion()) { +- this.deletion = new Deletion(other.deletion); +- } +- } +- +- public Mutation deepCopy() { +- return new Mutation(this); +- } +- +- @Override +- public void clear() { +- this.column_or_supercolumn = null; +- this.deletion = null; +- } +- +- public ColumnOrSuperColumn getColumn_or_supercolumn() { +- return this.column_or_supercolumn; +- } +- +- public Mutation setColumn_or_supercolumn(ColumnOrSuperColumn column_or_supercolumn) { +- this.column_or_supercolumn = column_or_supercolumn; +- return this; +- } +- +- public void unsetColumn_or_supercolumn() { +- this.column_or_supercolumn = null; +- } +- +- /** Returns true if field column_or_supercolumn is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_or_supercolumn() { +- return this.column_or_supercolumn != null; +- } +- +- public void setColumn_or_supercolumnIsSet(boolean value) { +- if (!value) { +- this.column_or_supercolumn = null; +- } +- } +- +- public Deletion getDeletion() { +- return this.deletion; +- } +- +- public Mutation setDeletion(Deletion deletion) { +- this.deletion = deletion; +- return this; +- } +- +- public void unsetDeletion() { +- this.deletion = null; +- } +- +- /** Returns true if field deletion is set (has been assigned a value) and false otherwise */ +- public boolean isSetDeletion() { +- return this.deletion != null; +- } +- +- public void setDeletionIsSet(boolean value) { +- if (!value) { +- this.deletion = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case COLUMN_OR_SUPERCOLUMN: +- if (value == null) { +- unsetColumn_or_supercolumn(); +- } else { +- setColumn_or_supercolumn((ColumnOrSuperColumn)value); +- } +- break; +- +- case DELETION: +- if (value == null) { +- unsetDeletion(); +- } else { +- setDeletion((Deletion)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case COLUMN_OR_SUPERCOLUMN: +- return getColumn_or_supercolumn(); +- +- case DELETION: +- return getDeletion(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case COLUMN_OR_SUPERCOLUMN: +- return isSetColumn_or_supercolumn(); +- case DELETION: +- return isSetDeletion(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof Mutation) +- return this.equals((Mutation)that); +- return false; +- } +- +- public boolean equals(Mutation that) { +- if (that == null) +- return false; +- +- boolean this_present_column_or_supercolumn = true && this.isSetColumn_or_supercolumn(); +- boolean that_present_column_or_supercolumn = true && that.isSetColumn_or_supercolumn(); +- if (this_present_column_or_supercolumn || that_present_column_or_supercolumn) { +- if (!(this_present_column_or_supercolumn && that_present_column_or_supercolumn)) +- return false; +- if (!this.column_or_supercolumn.equals(that.column_or_supercolumn)) +- return false; +- } +- +- boolean this_present_deletion = true && this.isSetDeletion(); +- boolean that_present_deletion = true && that.isSetDeletion(); +- if (this_present_deletion || that_present_deletion) { +- if (!(this_present_deletion && that_present_deletion)) +- return false; +- if (!this.deletion.equals(that.deletion)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_column_or_supercolumn = true && (isSetColumn_or_supercolumn()); +- builder.append(present_column_or_supercolumn); +- if (present_column_or_supercolumn) +- builder.append(column_or_supercolumn); +- +- boolean present_deletion = true && (isSetDeletion()); +- builder.append(present_deletion); +- if (present_deletion) +- builder.append(deletion); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(Mutation other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetColumn_or_supercolumn()).compareTo(other.isSetColumn_or_supercolumn()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_or_supercolumn()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_or_supercolumn, other.column_or_supercolumn); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetDeletion()).compareTo(other.isSetDeletion()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetDeletion()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deletion, other.deletion); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("Mutation("); +- boolean first = true; +- +- if (isSetColumn_or_supercolumn()) { +- sb.append("column_or_supercolumn:"); +- if (this.column_or_supercolumn == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_or_supercolumn); +- } +- first = false; +- } +- if (isSetDeletion()) { +- if (!first) sb.append(", "); +- sb.append("deletion:"); +- if (this.deletion == null) { +- sb.append("null"); +- } else { +- sb.append(this.deletion); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (column_or_supercolumn != null) { +- column_or_supercolumn.validate(); +- } +- if (deletion != null) { +- deletion.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class MutationStandardSchemeFactory implements SchemeFactory { +- public MutationStandardScheme getScheme() { +- return new MutationStandardScheme(); +- } +- } +- +- private static class MutationStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, Mutation struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // COLUMN_OR_SUPERCOLUMN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.column_or_supercolumn = new ColumnOrSuperColumn(); +- struct.column_or_supercolumn.read(iprot); +- struct.setColumn_or_supercolumnIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // DELETION +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.deletion = new Deletion(); +- struct.deletion.read(iprot); +- struct.setDeletionIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, Mutation struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.column_or_supercolumn != null) { +- if (struct.isSetColumn_or_supercolumn()) { +- oprot.writeFieldBegin(COLUMN_OR_SUPERCOLUMN_FIELD_DESC); +- struct.column_or_supercolumn.write(oprot); +- oprot.writeFieldEnd(); +- } +- } +- if (struct.deletion != null) { +- if (struct.isSetDeletion()) { +- oprot.writeFieldBegin(DELETION_FIELD_DESC); +- struct.deletion.write(oprot); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class MutationTupleSchemeFactory implements SchemeFactory { +- public MutationTupleScheme getScheme() { +- return new MutationTupleScheme(); +- } +- } +- +- private static class MutationTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, Mutation struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetColumn_or_supercolumn()) { +- optionals.set(0); +- } +- if (struct.isSetDeletion()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetColumn_or_supercolumn()) { +- struct.column_or_supercolumn.write(oprot); +- } +- if (struct.isSetDeletion()) { +- struct.deletion.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, Mutation struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- struct.column_or_supercolumn = new ColumnOrSuperColumn(); +- struct.column_or_supercolumn.read(iprot); +- struct.setColumn_or_supercolumnIsSet(true); +- } +- if (incoming.get(1)) { +- struct.deletion = new Deletion(); +- struct.deletion.read(iprot); +- struct.setDeletionIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/NotFoundException.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/NotFoundException.java +deleted file mode 100644 +index 0bd8cee..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/NotFoundException.java ++++ /dev/null +@@ -1,307 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * A specific column was requested that does not exist. +- */ +-public class NotFoundException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotFoundException"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new NotFoundExceptionStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new NotFoundExceptionTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotFoundException.class, metaDataMap); +- } +- +- public NotFoundException() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public NotFoundException(NotFoundException other) { +- } +- +- public NotFoundException deepCopy() { +- return new NotFoundException(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof NotFoundException) +- return this.equals((NotFoundException)that); +- return false; +- } +- +- public boolean equals(NotFoundException that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(NotFoundException other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("NotFoundException("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class NotFoundExceptionStandardSchemeFactory implements SchemeFactory { +- public NotFoundExceptionStandardScheme getScheme() { +- return new NotFoundExceptionStandardScheme(); +- } +- } +- +- private static class NotFoundExceptionStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, NotFoundException struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, NotFoundException struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class NotFoundExceptionTupleSchemeFactory implements SchemeFactory { +- public NotFoundExceptionTupleScheme getScheme() { +- return new NotFoundExceptionTupleScheme(); +- } +- } +- +- private static class NotFoundExceptionTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, NotFoundException struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, NotFoundException struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/SchemaDisagreementException.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/SchemaDisagreementException.java +deleted file mode 100644 +index 003b822..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/SchemaDisagreementException.java ++++ /dev/null +@@ -1,310 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * NOTE: This up outdated exception left for backward compatibility reasons, +- * no actual schema agreement validation is done starting from Cassandra 1.2 +- * +- * schemas are not in agreement across all nodes +- */ +-public class SchemaDisagreementException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SchemaDisagreementException"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new SchemaDisagreementExceptionStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new SchemaDisagreementExceptionTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SchemaDisagreementException.class, metaDataMap); +- } +- +- public SchemaDisagreementException() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public SchemaDisagreementException(SchemaDisagreementException other) { +- } +- +- public SchemaDisagreementException deepCopy() { +- return new SchemaDisagreementException(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof SchemaDisagreementException) +- return this.equals((SchemaDisagreementException)that); +- return false; +- } +- +- public boolean equals(SchemaDisagreementException that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(SchemaDisagreementException other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("SchemaDisagreementException("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class SchemaDisagreementExceptionStandardSchemeFactory implements SchemeFactory { +- public SchemaDisagreementExceptionStandardScheme getScheme() { +- return new SchemaDisagreementExceptionStandardScheme(); +- } +- } +- +- private static class SchemaDisagreementExceptionStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, SchemaDisagreementException struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, SchemaDisagreementException struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class SchemaDisagreementExceptionTupleSchemeFactory implements SchemeFactory { +- public SchemaDisagreementExceptionTupleScheme getScheme() { +- return new SchemaDisagreementExceptionTupleScheme(); +- } +- } +- +- private static class SchemaDisagreementExceptionTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, SchemaDisagreementException struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, SchemaDisagreementException struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/SlicePredicate.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/SlicePredicate.java +deleted file mode 100644 +index 9d46680..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/SlicePredicate.java ++++ /dev/null +@@ -1,588 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * A SlicePredicate is similar to a mathematic predicate (see http://en.wikipedia.org/wiki/Predicate_(mathematical_logic)), +- * which is described as "a property that the elements of a set have in common." +- * +- * SlicePredicate's in Cassandra are described with either a list of column_names or a SliceRange. If column_names is +- * specified, slice_range is ignored. +- * +- * @param column_name. A list of column names to retrieve. This can be used similar to Memcached's "multi-get" feature +- * to fetch N known column names. For instance, if you know you wish to fetch columns 'Joe', 'Jack', +- * and 'Jim' you can pass those column names as a list to fetch all three at once. +- * @param slice_range. A SliceRange describing how to range, order, and/or limit the slice. +- */ +-public class SlicePredicate implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SlicePredicate"); +- +- private static final org.apache.thrift.protocol.TField COLUMN_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("column_names", org.apache.thrift.protocol.TType.LIST, (short)1); +- private static final org.apache.thrift.protocol.TField SLICE_RANGE_FIELD_DESC = new org.apache.thrift.protocol.TField("slice_range", org.apache.thrift.protocol.TType.STRUCT, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new SlicePredicateStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new SlicePredicateTupleSchemeFactory()); +- } +- +- public List column_names; // optional +- public SliceRange slice_range; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- COLUMN_NAMES((short)1, "column_names"), +- SLICE_RANGE((short)2, "slice_range"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // COLUMN_NAMES +- return COLUMN_NAMES; +- case 2: // SLICE_RANGE +- return SLICE_RANGE; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private _Fields optionals[] = {_Fields.COLUMN_NAMES,_Fields.SLICE_RANGE}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.COLUMN_NAMES, new org.apache.thrift.meta_data.FieldMetaData("column_names", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)))); +- tmpMap.put(_Fields.SLICE_RANGE, new org.apache.thrift.meta_data.FieldMetaData("slice_range", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SliceRange.class))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SlicePredicate.class, metaDataMap); +- } +- +- public SlicePredicate() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public SlicePredicate(SlicePredicate other) { +- if (other.isSetColumn_names()) { +- List __this__column_names = new ArrayList(other.column_names); +- this.column_names = __this__column_names; +- } +- if (other.isSetSlice_range()) { +- this.slice_range = new SliceRange(other.slice_range); +- } +- } +- +- public SlicePredicate deepCopy() { +- return new SlicePredicate(this); +- } +- +- @Override +- public void clear() { +- this.column_names = null; +- this.slice_range = null; +- } +- +- public int getColumn_namesSize() { +- return (this.column_names == null) ? 0 : this.column_names.size(); +- } +- +- public java.util.Iterator getColumn_namesIterator() { +- return (this.column_names == null) ? null : this.column_names.iterator(); +- } +- +- public void addToColumn_names(ByteBuffer elem) { +- if (this.column_names == null) { +- this.column_names = new ArrayList(); +- } +- this.column_names.add(elem); +- } +- +- public List getColumn_names() { +- return this.column_names; +- } +- +- public SlicePredicate setColumn_names(List column_names) { +- this.column_names = column_names; +- return this; +- } +- +- public void unsetColumn_names() { +- this.column_names = null; +- } +- +- /** Returns true if field column_names is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumn_names() { +- return this.column_names != null; +- } +- +- public void setColumn_namesIsSet(boolean value) { +- if (!value) { +- this.column_names = null; +- } +- } +- +- public SliceRange getSlice_range() { +- return this.slice_range; +- } +- +- public SlicePredicate setSlice_range(SliceRange slice_range) { +- this.slice_range = slice_range; +- return this; +- } +- +- public void unsetSlice_range() { +- this.slice_range = null; +- } +- +- /** Returns true if field slice_range is set (has been assigned a value) and false otherwise */ +- public boolean isSetSlice_range() { +- return this.slice_range != null; +- } +- +- public void setSlice_rangeIsSet(boolean value) { +- if (!value) { +- this.slice_range = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case COLUMN_NAMES: +- if (value == null) { +- unsetColumn_names(); +- } else { +- setColumn_names((List)value); +- } +- break; +- +- case SLICE_RANGE: +- if (value == null) { +- unsetSlice_range(); +- } else { +- setSlice_range((SliceRange)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case COLUMN_NAMES: +- return getColumn_names(); +- +- case SLICE_RANGE: +- return getSlice_range(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case COLUMN_NAMES: +- return isSetColumn_names(); +- case SLICE_RANGE: +- return isSetSlice_range(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof SlicePredicate) +- return this.equals((SlicePredicate)that); +- return false; +- } +- +- public boolean equals(SlicePredicate that) { +- if (that == null) +- return false; +- +- boolean this_present_column_names = true && this.isSetColumn_names(); +- boolean that_present_column_names = true && that.isSetColumn_names(); +- if (this_present_column_names || that_present_column_names) { +- if (!(this_present_column_names && that_present_column_names)) +- return false; +- if (!this.column_names.equals(that.column_names)) +- return false; +- } +- +- boolean this_present_slice_range = true && this.isSetSlice_range(); +- boolean that_present_slice_range = true && that.isSetSlice_range(); +- if (this_present_slice_range || that_present_slice_range) { +- if (!(this_present_slice_range && that_present_slice_range)) +- return false; +- if (!this.slice_range.equals(that.slice_range)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_column_names = true && (isSetColumn_names()); +- builder.append(present_column_names); +- if (present_column_names) +- builder.append(column_names); +- +- boolean present_slice_range = true && (isSetSlice_range()); +- builder.append(present_slice_range); +- if (present_slice_range) +- builder.append(slice_range); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(SlicePredicate other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetColumn_names()).compareTo(other.isSetColumn_names()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumn_names()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_names, other.column_names); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetSlice_range()).compareTo(other.isSetSlice_range()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetSlice_range()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.slice_range, other.slice_range); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("SlicePredicate("); +- boolean first = true; +- +- if (isSetColumn_names()) { +- sb.append("column_names:"); +- if (this.column_names == null) { +- sb.append("null"); +- } else { +- sb.append(this.column_names); +- } +- first = false; +- } +- if (isSetSlice_range()) { +- if (!first) sb.append(", "); +- sb.append("slice_range:"); +- if (this.slice_range == null) { +- sb.append("null"); +- } else { +- sb.append(this.slice_range); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- if (slice_range != null) { +- slice_range.validate(); +- } +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class SlicePredicateStandardSchemeFactory implements SchemeFactory { +- public SlicePredicateStandardScheme getScheme() { +- return new SlicePredicateStandardScheme(); +- } +- } +- +- private static class SlicePredicateStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, SlicePredicate struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // COLUMN_NAMES +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list16 = iprot.readListBegin(); +- struct.column_names = new ArrayList(_list16.size); +- for (int _i17 = 0; _i17 < _list16.size; ++_i17) +- { +- ByteBuffer _elem18; +- _elem18 = iprot.readBinary(); +- struct.column_names.add(_elem18); +- } +- iprot.readListEnd(); +- } +- struct.setColumn_namesIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // SLICE_RANGE +- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { +- struct.slice_range = new SliceRange(); +- struct.slice_range.read(iprot); +- struct.setSlice_rangeIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, SlicePredicate struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.column_names != null) { +- if (struct.isSetColumn_names()) { +- oprot.writeFieldBegin(COLUMN_NAMES_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.column_names.size())); +- for (ByteBuffer _iter19 : struct.column_names) +- { +- oprot.writeBinary(_iter19); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- if (struct.slice_range != null) { +- if (struct.isSetSlice_range()) { +- oprot.writeFieldBegin(SLICE_RANGE_FIELD_DESC); +- struct.slice_range.write(oprot); +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class SlicePredicateTupleSchemeFactory implements SchemeFactory { +- public SlicePredicateTupleScheme getScheme() { +- return new SlicePredicateTupleScheme(); +- } +- } +- +- private static class SlicePredicateTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, SlicePredicate struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetColumn_names()) { +- optionals.set(0); +- } +- if (struct.isSetSlice_range()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetColumn_names()) { +- { +- oprot.writeI32(struct.column_names.size()); +- for (ByteBuffer _iter20 : struct.column_names) +- { +- oprot.writeBinary(_iter20); +- } +- } +- } +- if (struct.isSetSlice_range()) { +- struct.slice_range.write(oprot); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, SlicePredicate struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list21 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.column_names = new ArrayList(_list21.size); +- for (int _i22 = 0; _i22 < _list21.size; ++_i22) +- { +- ByteBuffer _elem23; +- _elem23 = iprot.readBinary(); +- struct.column_names.add(_elem23); +- } +- } +- struct.setColumn_namesIsSet(true); +- } +- if (incoming.get(1)) { +- struct.slice_range = new SliceRange(); +- struct.slice_range.read(iprot); +- struct.setSlice_rangeIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/SliceRange.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/SliceRange.java +deleted file mode 100644 +index 4b96c86..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/SliceRange.java ++++ /dev/null +@@ -1,749 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * A slice range is a structure that stores basic range, ordering and limit information for a query that will return +- * multiple columns. It could be thought of as Cassandra's version of LIMIT and ORDER BY +- * +- * @param start. The column name to start the slice with. This attribute is not required, though there is no default value, +- * and can be safely set to '', i.e., an empty byte array, to start with the first column name. Otherwise, it +- * must a valid value under the rules of the Comparator defined for the given ColumnFamily. +- * @param finish. The column name to stop the slice at. This attribute is not required, though there is no default value, +- * and can be safely set to an empty byte array to not stop until 'count' results are seen. Otherwise, it +- * must also be a valid value to the ColumnFamily Comparator. +- * @param reversed. Whether the results should be ordered in reversed order. Similar to ORDER BY blah DESC in SQL. +- * @param count. How many columns to return. Similar to LIMIT in SQL. May be arbitrarily large, but Thrift will +- * materialize the whole result into memory before returning it to the client, so be aware that you may +- * be better served by iterating through slices by passing the last value of one call in as the 'start' +- * of the next instead of increasing 'count' arbitrarily large. +- */ +-public class SliceRange implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SliceRange"); +- +- private static final org.apache.thrift.protocol.TField START_FIELD_DESC = new org.apache.thrift.protocol.TField("start", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField FINISH_FIELD_DESC = new org.apache.thrift.protocol.TField("finish", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField REVERSED_FIELD_DESC = new org.apache.thrift.protocol.TField("reversed", org.apache.thrift.protocol.TType.BOOL, (short)3); +- private static final org.apache.thrift.protocol.TField COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("count", org.apache.thrift.protocol.TType.I32, (short)4); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new SliceRangeStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new SliceRangeTupleSchemeFactory()); +- } +- +- public ByteBuffer start; // required +- public ByteBuffer finish; // required +- public boolean reversed; // required +- public int count; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- START((short)1, "start"), +- FINISH((short)2, "finish"), +- REVERSED((short)3, "reversed"), +- COUNT((short)4, "count"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // START +- return START; +- case 2: // FINISH +- return FINISH; +- case 3: // REVERSED +- return REVERSED; +- case 4: // COUNT +- return COUNT; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __REVERSED_ISSET_ID = 0; +- private static final int __COUNT_ISSET_ID = 1; +- private byte __isset_bitfield = 0; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.START, new org.apache.thrift.meta_data.FieldMetaData("start", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.FINISH, new org.apache.thrift.meta_data.FieldMetaData("finish", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.REVERSED, new org.apache.thrift.meta_data.FieldMetaData("reversed", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); +- tmpMap.put(_Fields.COUNT, new org.apache.thrift.meta_data.FieldMetaData("count", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SliceRange.class, metaDataMap); +- } +- +- public SliceRange() { +- this.reversed = false; +- +- this.count = 100; +- +- } +- +- public SliceRange( +- ByteBuffer start, +- ByteBuffer finish, +- boolean reversed, +- int count) +- { +- this(); +- this.start = start; +- this.finish = finish; +- this.reversed = reversed; +- setReversedIsSet(true); +- this.count = count; +- setCountIsSet(true); +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public SliceRange(SliceRange other) { +- __isset_bitfield = other.__isset_bitfield; +- if (other.isSetStart()) { +- this.start = org.apache.thrift.TBaseHelper.copyBinary(other.start); +-; +- } +- if (other.isSetFinish()) { +- this.finish = org.apache.thrift.TBaseHelper.copyBinary(other.finish); +-; +- } +- this.reversed = other.reversed; +- this.count = other.count; +- } +- +- public SliceRange deepCopy() { +- return new SliceRange(this); +- } +- +- @Override +- public void clear() { +- this.start = null; +- this.finish = null; +- this.reversed = false; +- +- this.count = 100; +- +- } +- +- public byte[] getStart() { +- setStart(org.apache.thrift.TBaseHelper.rightSize(start)); +- return start == null ? null : start.array(); +- } +- +- public ByteBuffer bufferForStart() { +- return start; +- } +- +- public SliceRange setStart(byte[] start) { +- setStart(start == null ? (ByteBuffer)null : ByteBuffer.wrap(start)); +- return this; +- } +- +- public SliceRange setStart(ByteBuffer start) { +- this.start = start; +- return this; +- } +- +- public void unsetStart() { +- this.start = null; +- } +- +- /** Returns true if field start is set (has been assigned a value) and false otherwise */ +- public boolean isSetStart() { +- return this.start != null; +- } +- +- public void setStartIsSet(boolean value) { +- if (!value) { +- this.start = null; +- } +- } +- +- public byte[] getFinish() { +- setFinish(org.apache.thrift.TBaseHelper.rightSize(finish)); +- return finish == null ? null : finish.array(); +- } +- +- public ByteBuffer bufferForFinish() { +- return finish; +- } +- +- public SliceRange setFinish(byte[] finish) { +- setFinish(finish == null ? (ByteBuffer)null : ByteBuffer.wrap(finish)); +- return this; +- } +- +- public SliceRange setFinish(ByteBuffer finish) { +- this.finish = finish; +- return this; +- } +- +- public void unsetFinish() { +- this.finish = null; +- } +- +- /** Returns true if field finish is set (has been assigned a value) and false otherwise */ +- public boolean isSetFinish() { +- return this.finish != null; +- } +- +- public void setFinishIsSet(boolean value) { +- if (!value) { +- this.finish = null; +- } +- } +- +- public boolean isReversed() { +- return this.reversed; +- } +- +- public SliceRange setReversed(boolean reversed) { +- this.reversed = reversed; +- setReversedIsSet(true); +- return this; +- } +- +- public void unsetReversed() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REVERSED_ISSET_ID); +- } +- +- /** Returns true if field reversed is set (has been assigned a value) and false otherwise */ +- public boolean isSetReversed() { +- return EncodingUtils.testBit(__isset_bitfield, __REVERSED_ISSET_ID); +- } +- +- public void setReversedIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REVERSED_ISSET_ID, value); +- } +- +- public int getCount() { +- return this.count; +- } +- +- public SliceRange setCount(int count) { +- this.count = count; +- setCountIsSet(true); +- return this; +- } +- +- public void unsetCount() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- /** Returns true if field count is set (has been assigned a value) and false otherwise */ +- public boolean isSetCount() { +- return EncodingUtils.testBit(__isset_bitfield, __COUNT_ISSET_ID); +- } +- +- public void setCountIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COUNT_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case START: +- if (value == null) { +- unsetStart(); +- } else { +- setStart((ByteBuffer)value); +- } +- break; +- +- case FINISH: +- if (value == null) { +- unsetFinish(); +- } else { +- setFinish((ByteBuffer)value); +- } +- break; +- +- case REVERSED: +- if (value == null) { +- unsetReversed(); +- } else { +- setReversed((Boolean)value); +- } +- break; +- +- case COUNT: +- if (value == null) { +- unsetCount(); +- } else { +- setCount((Integer)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case START: +- return getStart(); +- +- case FINISH: +- return getFinish(); +- +- case REVERSED: +- return Boolean.valueOf(isReversed()); +- +- case COUNT: +- return Integer.valueOf(getCount()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case START: +- return isSetStart(); +- case FINISH: +- return isSetFinish(); +- case REVERSED: +- return isSetReversed(); +- case COUNT: +- return isSetCount(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof SliceRange) +- return this.equals((SliceRange)that); +- return false; +- } +- +- public boolean equals(SliceRange that) { +- if (that == null) +- return false; +- +- boolean this_present_start = true && this.isSetStart(); +- boolean that_present_start = true && that.isSetStart(); +- if (this_present_start || that_present_start) { +- if (!(this_present_start && that_present_start)) +- return false; +- if (!this.start.equals(that.start)) +- return false; +- } +- +- boolean this_present_finish = true && this.isSetFinish(); +- boolean that_present_finish = true && that.isSetFinish(); +- if (this_present_finish || that_present_finish) { +- if (!(this_present_finish && that_present_finish)) +- return false; +- if (!this.finish.equals(that.finish)) +- return false; +- } +- +- boolean this_present_reversed = true; +- boolean that_present_reversed = true; +- if (this_present_reversed || that_present_reversed) { +- if (!(this_present_reversed && that_present_reversed)) +- return false; +- if (this.reversed != that.reversed) +- return false; +- } +- +- boolean this_present_count = true; +- boolean that_present_count = true; +- if (this_present_count || that_present_count) { +- if (!(this_present_count && that_present_count)) +- return false; +- if (this.count != that.count) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_start = true && (isSetStart()); +- builder.append(present_start); +- if (present_start) +- builder.append(start); +- +- boolean present_finish = true && (isSetFinish()); +- builder.append(present_finish); +- if (present_finish) +- builder.append(finish); +- +- boolean present_reversed = true; +- builder.append(present_reversed); +- if (present_reversed) +- builder.append(reversed); +- +- boolean present_count = true; +- builder.append(present_count); +- if (present_count) +- builder.append(count); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(SliceRange other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetStart()).compareTo(other.isSetStart()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStart()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start, other.start); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetFinish()).compareTo(other.isSetFinish()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetFinish()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.finish, other.finish); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetReversed()).compareTo(other.isSetReversed()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetReversed()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.reversed, other.reversed); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetCount()).compareTo(other.isSetCount()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetCount()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.count, other.count); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("SliceRange("); +- boolean first = true; +- +- sb.append("start:"); +- if (this.start == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.start, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("finish:"); +- if (this.finish == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.finish, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("reversed:"); +- sb.append(this.reversed); +- first = false; +- if (!first) sb.append(", "); +- sb.append("count:"); +- sb.append(this.count); +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (start == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'start' was not present! Struct: " + toString()); +- } +- if (finish == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'finish' was not present! Struct: " + toString()); +- } +- // alas, we cannot check 'reversed' because it's a primitive and you chose the non-beans generator. +- // alas, we cannot check 'count' because it's a primitive and you chose the non-beans generator. +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class SliceRangeStandardSchemeFactory implements SchemeFactory { +- public SliceRangeStandardScheme getScheme() { +- return new SliceRangeStandardScheme(); +- } +- } +- +- private static class SliceRangeStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, SliceRange struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // START +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.start = iprot.readBinary(); +- struct.setStartIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // FINISH +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.finish = iprot.readBinary(); +- struct.setFinishIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // REVERSED +- if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { +- struct.reversed = iprot.readBool(); +- struct.setReversedIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // COUNT +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- if (!struct.isSetReversed()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'reversed' was not found in serialized data! Struct: " + toString()); +- } +- if (!struct.isSetCount()) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'count' was not found in serialized data! Struct: " + toString()); +- } +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, SliceRange struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.start != null) { +- oprot.writeFieldBegin(START_FIELD_DESC); +- oprot.writeBinary(struct.start); +- oprot.writeFieldEnd(); +- } +- if (struct.finish != null) { +- oprot.writeFieldBegin(FINISH_FIELD_DESC); +- oprot.writeBinary(struct.finish); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldBegin(REVERSED_FIELD_DESC); +- oprot.writeBool(struct.reversed); +- oprot.writeFieldEnd(); +- oprot.writeFieldBegin(COUNT_FIELD_DESC); +- oprot.writeI32(struct.count); +- oprot.writeFieldEnd(); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class SliceRangeTupleSchemeFactory implements SchemeFactory { +- public SliceRangeTupleScheme getScheme() { +- return new SliceRangeTupleScheme(); +- } +- } +- +- private static class SliceRangeTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, SliceRange struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.start); +- oprot.writeBinary(struct.finish); +- oprot.writeBool(struct.reversed); +- oprot.writeI32(struct.count); +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, SliceRange struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.start = iprot.readBinary(); +- struct.setStartIsSet(true); +- struct.finish = iprot.readBinary(); +- struct.setFinishIsSet(true); +- struct.reversed = iprot.readBool(); +- struct.setReversedIsSet(true); +- struct.count = iprot.readI32(); +- struct.setCountIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/SuperColumn.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/SuperColumn.java +deleted file mode 100644 +index 37215bf..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/SuperColumn.java ++++ /dev/null +@@ -1,582 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * A named list of columns. +- * @param name. see Column.name. +- * @param columns. A collection of standard Columns. The columns within a super column are defined in an adhoc manner. +- * Columns within a super column do not have to have matching structures (similarly named child columns). +- */ +-public class SuperColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SuperColumn"); +- +- private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new SuperColumnStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new SuperColumnTupleSchemeFactory()); +- } +- +- public ByteBuffer name; // required +- public List columns; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- NAME((short)1, "name"), +- COLUMNS((short)2, "columns"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // NAME +- return NAME; +- case 2: // COLUMNS +- return COLUMNS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); +- tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Column.class)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SuperColumn.class, metaDataMap); +- } +- +- public SuperColumn() { +- } +- +- public SuperColumn( +- ByteBuffer name, +- List columns) +- { +- this(); +- this.name = name; +- this.columns = columns; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public SuperColumn(SuperColumn other) { +- if (other.isSetName()) { +- this.name = org.apache.thrift.TBaseHelper.copyBinary(other.name); +-; +- } +- if (other.isSetColumns()) { +- List __this__columns = new ArrayList(other.columns.size()); +- for (Column other_element : other.columns) { +- __this__columns.add(new Column(other_element)); +- } +- this.columns = __this__columns; +- } +- } +- +- public SuperColumn deepCopy() { +- return new SuperColumn(this); +- } +- +- @Override +- public void clear() { +- this.name = null; +- this.columns = null; +- } +- +- public byte[] getName() { +- setName(org.apache.thrift.TBaseHelper.rightSize(name)); +- return name == null ? null : name.array(); +- } +- +- public ByteBuffer bufferForName() { +- return name; +- } +- +- public SuperColumn setName(byte[] name) { +- setName(name == null ? (ByteBuffer)null : ByteBuffer.wrap(name)); +- return this; +- } +- +- public SuperColumn setName(ByteBuffer name) { +- this.name = name; +- return this; +- } +- +- public void unsetName() { +- this.name = null; +- } +- +- /** Returns true if field name is set (has been assigned a value) and false otherwise */ +- public boolean isSetName() { +- return this.name != null; +- } +- +- public void setNameIsSet(boolean value) { +- if (!value) { +- this.name = null; +- } +- } +- +- public int getColumnsSize() { +- return (this.columns == null) ? 0 : this.columns.size(); +- } +- +- public java.util.Iterator getColumnsIterator() { +- return (this.columns == null) ? null : this.columns.iterator(); +- } +- +- public void addToColumns(Column elem) { +- if (this.columns == null) { +- this.columns = new ArrayList(); +- } +- this.columns.add(elem); +- } +- +- public List getColumns() { +- return this.columns; +- } +- +- public SuperColumn setColumns(List columns) { +- this.columns = columns; +- return this; +- } +- +- public void unsetColumns() { +- this.columns = null; +- } +- +- /** Returns true if field columns is set (has been assigned a value) and false otherwise */ +- public boolean isSetColumns() { +- return this.columns != null; +- } +- +- public void setColumnsIsSet(boolean value) { +- if (!value) { +- this.columns = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case NAME: +- if (value == null) { +- unsetName(); +- } else { +- setName((ByteBuffer)value); +- } +- break; +- +- case COLUMNS: +- if (value == null) { +- unsetColumns(); +- } else { +- setColumns((List)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case NAME: +- return getName(); +- +- case COLUMNS: +- return getColumns(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case NAME: +- return isSetName(); +- case COLUMNS: +- return isSetColumns(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof SuperColumn) +- return this.equals((SuperColumn)that); +- return false; +- } +- +- public boolean equals(SuperColumn that) { +- if (that == null) +- return false; +- +- boolean this_present_name = true && this.isSetName(); +- boolean that_present_name = true && that.isSetName(); +- if (this_present_name || that_present_name) { +- if (!(this_present_name && that_present_name)) +- return false; +- if (!this.name.equals(that.name)) +- return false; +- } +- +- boolean this_present_columns = true && this.isSetColumns(); +- boolean that_present_columns = true && that.isSetColumns(); +- if (this_present_columns || that_present_columns) { +- if (!(this_present_columns && that_present_columns)) +- return false; +- if (!this.columns.equals(that.columns)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_name = true && (isSetName()); +- builder.append(present_name); +- if (present_name) +- builder.append(name); +- +- boolean present_columns = true && (isSetColumns()); +- builder.append(present_columns); +- if (present_columns) +- builder.append(columns); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(SuperColumn other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetName()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetColumns()).compareTo(other.isSetColumns()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetColumns()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columns, other.columns); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("SuperColumn("); +- boolean first = true; +- +- sb.append("name:"); +- if (this.name == null) { +- sb.append("null"); +- } else { +- org.apache.thrift.TBaseHelper.toString(this.name, sb); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("columns:"); +- if (this.columns == null) { +- sb.append("null"); +- } else { +- sb.append(this.columns); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (name == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString()); +- } +- if (columns == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'columns' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class SuperColumnStandardSchemeFactory implements SchemeFactory { +- public SuperColumnStandardScheme getScheme() { +- return new SuperColumnStandardScheme(); +- } +- } +- +- private static class SuperColumnStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, SuperColumn struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.name = iprot.readBinary(); +- struct.setNameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // COLUMNS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); +- struct.columns = new ArrayList(_list0.size); +- for (int _i1 = 0; _i1 < _list0.size; ++_i1) +- { +- Column _elem2; +- _elem2 = new Column(); +- _elem2.read(iprot); +- struct.columns.add(_elem2); +- } +- iprot.readListEnd(); +- } +- struct.setColumnsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, SuperColumn struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.name != null) { +- oprot.writeFieldBegin(NAME_FIELD_DESC); +- oprot.writeBinary(struct.name); +- oprot.writeFieldEnd(); +- } +- if (struct.columns != null) { +- oprot.writeFieldBegin(COLUMNS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size())); +- for (Column _iter3 : struct.columns) +- { +- _iter3.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class SuperColumnTupleSchemeFactory implements SchemeFactory { +- public SuperColumnTupleScheme getScheme() { +- return new SuperColumnTupleScheme(); +- } +- } +- +- private static class SuperColumnTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, SuperColumn struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeBinary(struct.name); +- { +- oprot.writeI32(struct.columns.size()); +- for (Column _iter4 : struct.columns) +- { +- _iter4.write(oprot); +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, SuperColumn struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.name = iprot.readBinary(); +- struct.setNameIsSet(true); +- { +- org.apache.thrift.protocol.TList _list5 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.columns = new ArrayList(_list5.size); +- for (int _i6 = 0; _i6 < _list5.size; ++_i6) +- { +- Column _elem7; +- _elem7 = new Column(); +- _elem7.read(iprot); +- struct.columns.add(_elem7); +- } +- } +- struct.setColumnsIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/TimedOutException.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/TimedOutException.java +deleted file mode 100644 +index 2dafe85..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/TimedOutException.java ++++ /dev/null +@@ -1,671 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * RPC timeout was exceeded. either a node failed mid-operation, or load was too high, or the requested op was too large. +- */ +-public class TimedOutException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TimedOutException"); +- +- private static final org.apache.thrift.protocol.TField ACKNOWLEDGED_BY_FIELD_DESC = new org.apache.thrift.protocol.TField("acknowledged_by", org.apache.thrift.protocol.TType.I32, (short)1); +- private static final org.apache.thrift.protocol.TField ACKNOWLEDGED_BY_BATCHLOG_FIELD_DESC = new org.apache.thrift.protocol.TField("acknowledged_by_batchlog", org.apache.thrift.protocol.TType.BOOL, (short)2); +- private static final org.apache.thrift.protocol.TField PAXOS_IN_PROGRESS_FIELD_DESC = new org.apache.thrift.protocol.TField("paxos_in_progress", org.apache.thrift.protocol.TType.BOOL, (short)3); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new TimedOutExceptionStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new TimedOutExceptionTupleSchemeFactory()); +- } +- +- /** +- * if a write operation was acknowledged by some replicas but not by enough to +- * satisfy the required ConsistencyLevel, the number of successful +- * replies will be given here. In case of atomic_batch_mutate method this field +- * will be set to -1 if the batch was written to the batchlog and to 0 if it wasn't. +- */ +- public int acknowledged_by; // optional +- /** +- * in case of atomic_batch_mutate method this field tells if the batch +- * was written to the batchlog. +- */ +- public boolean acknowledged_by_batchlog; // optional +- /** +- * for the CAS method, this field tells if we timed out during the paxos +- * protocol, as opposed to during the commit of our update +- */ +- public boolean paxos_in_progress; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- /** +- * if a write operation was acknowledged by some replicas but not by enough to +- * satisfy the required ConsistencyLevel, the number of successful +- * replies will be given here. In case of atomic_batch_mutate method this field +- * will be set to -1 if the batch was written to the batchlog and to 0 if it wasn't. +- */ +- ACKNOWLEDGED_BY((short)1, "acknowledged_by"), +- /** +- * in case of atomic_batch_mutate method this field tells if the batch +- * was written to the batchlog. +- */ +- ACKNOWLEDGED_BY_BATCHLOG((short)2, "acknowledged_by_batchlog"), +- /** +- * for the CAS method, this field tells if we timed out during the paxos +- * protocol, as opposed to during the commit of our update +- */ +- PAXOS_IN_PROGRESS((short)3, "paxos_in_progress"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // ACKNOWLEDGED_BY +- return ACKNOWLEDGED_BY; +- case 2: // ACKNOWLEDGED_BY_BATCHLOG +- return ACKNOWLEDGED_BY_BATCHLOG; +- case 3: // PAXOS_IN_PROGRESS +- return PAXOS_IN_PROGRESS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private static final int __ACKNOWLEDGED_BY_ISSET_ID = 0; +- private static final int __ACKNOWLEDGED_BY_BATCHLOG_ISSET_ID = 1; +- private static final int __PAXOS_IN_PROGRESS_ISSET_ID = 2; +- private byte __isset_bitfield = 0; +- private _Fields optionals[] = {_Fields.ACKNOWLEDGED_BY,_Fields.ACKNOWLEDGED_BY_BATCHLOG,_Fields.PAXOS_IN_PROGRESS}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.ACKNOWLEDGED_BY, new org.apache.thrift.meta_data.FieldMetaData("acknowledged_by", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); +- tmpMap.put(_Fields.ACKNOWLEDGED_BY_BATCHLOG, new org.apache.thrift.meta_data.FieldMetaData("acknowledged_by_batchlog", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); +- tmpMap.put(_Fields.PAXOS_IN_PROGRESS, new org.apache.thrift.meta_data.FieldMetaData("paxos_in_progress", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TimedOutException.class, metaDataMap); +- } +- +- public TimedOutException() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public TimedOutException(TimedOutException other) { +- __isset_bitfield = other.__isset_bitfield; +- this.acknowledged_by = other.acknowledged_by; +- this.acknowledged_by_batchlog = other.acknowledged_by_batchlog; +- this.paxos_in_progress = other.paxos_in_progress; +- } +- +- public TimedOutException deepCopy() { +- return new TimedOutException(this); +- } +- +- @Override +- public void clear() { +- setAcknowledged_byIsSet(false); +- this.acknowledged_by = 0; +- setAcknowledged_by_batchlogIsSet(false); +- this.acknowledged_by_batchlog = false; +- setPaxos_in_progressIsSet(false); +- this.paxos_in_progress = false; +- } +- +- /** +- * if a write operation was acknowledged by some replicas but not by enough to +- * satisfy the required ConsistencyLevel, the number of successful +- * replies will be given here. In case of atomic_batch_mutate method this field +- * will be set to -1 if the batch was written to the batchlog and to 0 if it wasn't. +- */ +- public int getAcknowledged_by() { +- return this.acknowledged_by; +- } +- +- /** +- * if a write operation was acknowledged by some replicas but not by enough to +- * satisfy the required ConsistencyLevel, the number of successful +- * replies will be given here. In case of atomic_batch_mutate method this field +- * will be set to -1 if the batch was written to the batchlog and to 0 if it wasn't. +- */ +- public TimedOutException setAcknowledged_by(int acknowledged_by) { +- this.acknowledged_by = acknowledged_by; +- setAcknowledged_byIsSet(true); +- return this; +- } +- +- public void unsetAcknowledged_by() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ACKNOWLEDGED_BY_ISSET_ID); +- } +- +- /** Returns true if field acknowledged_by is set (has been assigned a value) and false otherwise */ +- public boolean isSetAcknowledged_by() { +- return EncodingUtils.testBit(__isset_bitfield, __ACKNOWLEDGED_BY_ISSET_ID); +- } +- +- public void setAcknowledged_byIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ACKNOWLEDGED_BY_ISSET_ID, value); +- } +- +- /** +- * in case of atomic_batch_mutate method this field tells if the batch +- * was written to the batchlog. +- */ +- public boolean isAcknowledged_by_batchlog() { +- return this.acknowledged_by_batchlog; +- } +- +- /** +- * in case of atomic_batch_mutate method this field tells if the batch +- * was written to the batchlog. +- */ +- public TimedOutException setAcknowledged_by_batchlog(boolean acknowledged_by_batchlog) { +- this.acknowledged_by_batchlog = acknowledged_by_batchlog; +- setAcknowledged_by_batchlogIsSet(true); +- return this; +- } +- +- public void unsetAcknowledged_by_batchlog() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ACKNOWLEDGED_BY_BATCHLOG_ISSET_ID); +- } +- +- /** Returns true if field acknowledged_by_batchlog is set (has been assigned a value) and false otherwise */ +- public boolean isSetAcknowledged_by_batchlog() { +- return EncodingUtils.testBit(__isset_bitfield, __ACKNOWLEDGED_BY_BATCHLOG_ISSET_ID); +- } +- +- public void setAcknowledged_by_batchlogIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ACKNOWLEDGED_BY_BATCHLOG_ISSET_ID, value); +- } +- +- /** +- * for the CAS method, this field tells if we timed out during the paxos +- * protocol, as opposed to during the commit of our update +- */ +- public boolean isPaxos_in_progress() { +- return this.paxos_in_progress; +- } +- +- /** +- * for the CAS method, this field tells if we timed out during the paxos +- * protocol, as opposed to during the commit of our update +- */ +- public TimedOutException setPaxos_in_progress(boolean paxos_in_progress) { +- this.paxos_in_progress = paxos_in_progress; +- setPaxos_in_progressIsSet(true); +- return this; +- } +- +- public void unsetPaxos_in_progress() { +- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PAXOS_IN_PROGRESS_ISSET_ID); +- } +- +- /** Returns true if field paxos_in_progress is set (has been assigned a value) and false otherwise */ +- public boolean isSetPaxos_in_progress() { +- return EncodingUtils.testBit(__isset_bitfield, __PAXOS_IN_PROGRESS_ISSET_ID); +- } +- +- public void setPaxos_in_progressIsSet(boolean value) { +- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PAXOS_IN_PROGRESS_ISSET_ID, value); +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case ACKNOWLEDGED_BY: +- if (value == null) { +- unsetAcknowledged_by(); +- } else { +- setAcknowledged_by((Integer)value); +- } +- break; +- +- case ACKNOWLEDGED_BY_BATCHLOG: +- if (value == null) { +- unsetAcknowledged_by_batchlog(); +- } else { +- setAcknowledged_by_batchlog((Boolean)value); +- } +- break; +- +- case PAXOS_IN_PROGRESS: +- if (value == null) { +- unsetPaxos_in_progress(); +- } else { +- setPaxos_in_progress((Boolean)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case ACKNOWLEDGED_BY: +- return Integer.valueOf(getAcknowledged_by()); +- +- case ACKNOWLEDGED_BY_BATCHLOG: +- return Boolean.valueOf(isAcknowledged_by_batchlog()); +- +- case PAXOS_IN_PROGRESS: +- return Boolean.valueOf(isPaxos_in_progress()); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case ACKNOWLEDGED_BY: +- return isSetAcknowledged_by(); +- case ACKNOWLEDGED_BY_BATCHLOG: +- return isSetAcknowledged_by_batchlog(); +- case PAXOS_IN_PROGRESS: +- return isSetPaxos_in_progress(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof TimedOutException) +- return this.equals((TimedOutException)that); +- return false; +- } +- +- public boolean equals(TimedOutException that) { +- if (that == null) +- return false; +- +- boolean this_present_acknowledged_by = true && this.isSetAcknowledged_by(); +- boolean that_present_acknowledged_by = true && that.isSetAcknowledged_by(); +- if (this_present_acknowledged_by || that_present_acknowledged_by) { +- if (!(this_present_acknowledged_by && that_present_acknowledged_by)) +- return false; +- if (this.acknowledged_by != that.acknowledged_by) +- return false; +- } +- +- boolean this_present_acknowledged_by_batchlog = true && this.isSetAcknowledged_by_batchlog(); +- boolean that_present_acknowledged_by_batchlog = true && that.isSetAcknowledged_by_batchlog(); +- if (this_present_acknowledged_by_batchlog || that_present_acknowledged_by_batchlog) { +- if (!(this_present_acknowledged_by_batchlog && that_present_acknowledged_by_batchlog)) +- return false; +- if (this.acknowledged_by_batchlog != that.acknowledged_by_batchlog) +- return false; +- } +- +- boolean this_present_paxos_in_progress = true && this.isSetPaxos_in_progress(); +- boolean that_present_paxos_in_progress = true && that.isSetPaxos_in_progress(); +- if (this_present_paxos_in_progress || that_present_paxos_in_progress) { +- if (!(this_present_paxos_in_progress && that_present_paxos_in_progress)) +- return false; +- if (this.paxos_in_progress != that.paxos_in_progress) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_acknowledged_by = true && (isSetAcknowledged_by()); +- builder.append(present_acknowledged_by); +- if (present_acknowledged_by) +- builder.append(acknowledged_by); +- +- boolean present_acknowledged_by_batchlog = true && (isSetAcknowledged_by_batchlog()); +- builder.append(present_acknowledged_by_batchlog); +- if (present_acknowledged_by_batchlog) +- builder.append(acknowledged_by_batchlog); +- +- boolean present_paxos_in_progress = true && (isSetPaxos_in_progress()); +- builder.append(present_paxos_in_progress); +- if (present_paxos_in_progress) +- builder.append(paxos_in_progress); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(TimedOutException other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetAcknowledged_by()).compareTo(other.isSetAcknowledged_by()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetAcknowledged_by()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.acknowledged_by, other.acknowledged_by); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetAcknowledged_by_batchlog()).compareTo(other.isSetAcknowledged_by_batchlog()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetAcknowledged_by_batchlog()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.acknowledged_by_batchlog, other.acknowledged_by_batchlog); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetPaxos_in_progress()).compareTo(other.isSetPaxos_in_progress()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetPaxos_in_progress()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.paxos_in_progress, other.paxos_in_progress); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("TimedOutException("); +- boolean first = true; +- +- if (isSetAcknowledged_by()) { +- sb.append("acknowledged_by:"); +- sb.append(this.acknowledged_by); +- first = false; +- } +- if (isSetAcknowledged_by_batchlog()) { +- if (!first) sb.append(", "); +- sb.append("acknowledged_by_batchlog:"); +- sb.append(this.acknowledged_by_batchlog); +- first = false; +- } +- if (isSetPaxos_in_progress()) { +- if (!first) sb.append(", "); +- sb.append("paxos_in_progress:"); +- sb.append(this.paxos_in_progress); +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. +- __isset_bitfield = 0; +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class TimedOutExceptionStandardSchemeFactory implements SchemeFactory { +- public TimedOutExceptionStandardScheme getScheme() { +- return new TimedOutExceptionStandardScheme(); +- } +- } +- +- private static class TimedOutExceptionStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, TimedOutException struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // ACKNOWLEDGED_BY +- if (schemeField.type == org.apache.thrift.protocol.TType.I32) { +- struct.acknowledged_by = iprot.readI32(); +- struct.setAcknowledged_byIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // ACKNOWLEDGED_BY_BATCHLOG +- if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { +- struct.acknowledged_by_batchlog = iprot.readBool(); +- struct.setAcknowledged_by_batchlogIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // PAXOS_IN_PROGRESS +- if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { +- struct.paxos_in_progress = iprot.readBool(); +- struct.setPaxos_in_progressIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, TimedOutException struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.isSetAcknowledged_by()) { +- oprot.writeFieldBegin(ACKNOWLEDGED_BY_FIELD_DESC); +- oprot.writeI32(struct.acknowledged_by); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetAcknowledged_by_batchlog()) { +- oprot.writeFieldBegin(ACKNOWLEDGED_BY_BATCHLOG_FIELD_DESC); +- oprot.writeBool(struct.acknowledged_by_batchlog); +- oprot.writeFieldEnd(); +- } +- if (struct.isSetPaxos_in_progress()) { +- oprot.writeFieldBegin(PAXOS_IN_PROGRESS_FIELD_DESC); +- oprot.writeBool(struct.paxos_in_progress); +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class TimedOutExceptionTupleSchemeFactory implements SchemeFactory { +- public TimedOutExceptionTupleScheme getScheme() { +- return new TimedOutExceptionTupleScheme(); +- } +- } +- +- private static class TimedOutExceptionTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, TimedOutException struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- BitSet optionals = new BitSet(); +- if (struct.isSetAcknowledged_by()) { +- optionals.set(0); +- } +- if (struct.isSetAcknowledged_by_batchlog()) { +- optionals.set(1); +- } +- if (struct.isSetPaxos_in_progress()) { +- optionals.set(2); +- } +- oprot.writeBitSet(optionals, 3); +- if (struct.isSetAcknowledged_by()) { +- oprot.writeI32(struct.acknowledged_by); +- } +- if (struct.isSetAcknowledged_by_batchlog()) { +- oprot.writeBool(struct.acknowledged_by_batchlog); +- } +- if (struct.isSetPaxos_in_progress()) { +- oprot.writeBool(struct.paxos_in_progress); +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, TimedOutException struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- BitSet incoming = iprot.readBitSet(3); +- if (incoming.get(0)) { +- struct.acknowledged_by = iprot.readI32(); +- struct.setAcknowledged_byIsSet(true); +- } +- if (incoming.get(1)) { +- struct.acknowledged_by_batchlog = iprot.readBool(); +- struct.setAcknowledged_by_batchlogIsSet(true); +- } +- if (incoming.get(2)) { +- struct.paxos_in_progress = iprot.readBool(); +- struct.setPaxos_in_progressIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/TokenRange.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/TokenRange.java +deleted file mode 100644 +index 37d0f77..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/TokenRange.java ++++ /dev/null +@@ -1,990 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * A TokenRange describes part of the Cassandra ring, it is a mapping from a range to +- * endpoints responsible for that range. +- * @param start_token The first token in the range +- * @param end_token The last token in the range +- * @param endpoints The endpoints responsible for the range (listed by their configured listen_address) +- * @param rpc_endpoints The endpoints responsible for the range (listed by their configured rpc_address) +- */ +-public class TokenRange implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TokenRange"); +- +- private static final org.apache.thrift.protocol.TField START_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("start_token", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField END_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("end_token", org.apache.thrift.protocol.TType.STRING, (short)2); +- private static final org.apache.thrift.protocol.TField ENDPOINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("endpoints", org.apache.thrift.protocol.TType.LIST, (short)3); +- private static final org.apache.thrift.protocol.TField RPC_ENDPOINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("rpc_endpoints", org.apache.thrift.protocol.TType.LIST, (short)4); +- private static final org.apache.thrift.protocol.TField ENDPOINT_DETAILS_FIELD_DESC = new org.apache.thrift.protocol.TField("endpoint_details", org.apache.thrift.protocol.TType.LIST, (short)5); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new TokenRangeStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new TokenRangeTupleSchemeFactory()); +- } +- +- public String start_token; // required +- public String end_token; // required +- public List endpoints; // required +- public List rpc_endpoints; // optional +- public List endpoint_details; // optional +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- START_TOKEN((short)1, "start_token"), +- END_TOKEN((short)2, "end_token"), +- ENDPOINTS((short)3, "endpoints"), +- RPC_ENDPOINTS((short)4, "rpc_endpoints"), +- ENDPOINT_DETAILS((short)5, "endpoint_details"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // START_TOKEN +- return START_TOKEN; +- case 2: // END_TOKEN +- return END_TOKEN; +- case 3: // ENDPOINTS +- return ENDPOINTS; +- case 4: // RPC_ENDPOINTS +- return RPC_ENDPOINTS; +- case 5: // ENDPOINT_DETAILS +- return ENDPOINT_DETAILS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- private _Fields optionals[] = {_Fields.RPC_ENDPOINTS,_Fields.ENDPOINT_DETAILS}; +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.START_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("start_token", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.END_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("end_token", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.ENDPOINTS, new org.apache.thrift.meta_data.FieldMetaData("endpoints", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- tmpMap.put(_Fields.RPC_ENDPOINTS, new org.apache.thrift.meta_data.FieldMetaData("rpc_endpoints", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- tmpMap.put(_Fields.ENDPOINT_DETAILS, new org.apache.thrift.meta_data.FieldMetaData("endpoint_details", org.apache.thrift.TFieldRequirementType.OPTIONAL, +- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, +- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EndpointDetails.class)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TokenRange.class, metaDataMap); +- } +- +- public TokenRange() { +- } +- +- public TokenRange( +- String start_token, +- String end_token, +- List endpoints) +- { +- this(); +- this.start_token = start_token; +- this.end_token = end_token; +- this.endpoints = endpoints; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public TokenRange(TokenRange other) { +- if (other.isSetStart_token()) { +- this.start_token = other.start_token; +- } +- if (other.isSetEnd_token()) { +- this.end_token = other.end_token; +- } +- if (other.isSetEndpoints()) { +- List __this__endpoints = new ArrayList(other.endpoints); +- this.endpoints = __this__endpoints; +- } +- if (other.isSetRpc_endpoints()) { +- List __this__rpc_endpoints = new ArrayList(other.rpc_endpoints); +- this.rpc_endpoints = __this__rpc_endpoints; +- } +- if (other.isSetEndpoint_details()) { +- List __this__endpoint_details = new ArrayList(other.endpoint_details.size()); +- for (EndpointDetails other_element : other.endpoint_details) { +- __this__endpoint_details.add(new EndpointDetails(other_element)); +- } +- this.endpoint_details = __this__endpoint_details; +- } +- } +- +- public TokenRange deepCopy() { +- return new TokenRange(this); +- } +- +- @Override +- public void clear() { +- this.start_token = null; +- this.end_token = null; +- this.endpoints = null; +- this.rpc_endpoints = null; +- this.endpoint_details = null; +- } +- +- public String getStart_token() { +- return this.start_token; +- } +- +- public TokenRange setStart_token(String start_token) { +- this.start_token = start_token; +- return this; +- } +- +- public void unsetStart_token() { +- this.start_token = null; +- } +- +- /** Returns true if field start_token is set (has been assigned a value) and false otherwise */ +- public boolean isSetStart_token() { +- return this.start_token != null; +- } +- +- public void setStart_tokenIsSet(boolean value) { +- if (!value) { +- this.start_token = null; +- } +- } +- +- public String getEnd_token() { +- return this.end_token; +- } +- +- public TokenRange setEnd_token(String end_token) { +- this.end_token = end_token; +- return this; +- } +- +- public void unsetEnd_token() { +- this.end_token = null; +- } +- +- /** Returns true if field end_token is set (has been assigned a value) and false otherwise */ +- public boolean isSetEnd_token() { +- return this.end_token != null; +- } +- +- public void setEnd_tokenIsSet(boolean value) { +- if (!value) { +- this.end_token = null; +- } +- } +- +- public int getEndpointsSize() { +- return (this.endpoints == null) ? 0 : this.endpoints.size(); +- } +- +- public java.util.Iterator getEndpointsIterator() { +- return (this.endpoints == null) ? null : this.endpoints.iterator(); +- } +- +- public void addToEndpoints(String elem) { +- if (this.endpoints == null) { +- this.endpoints = new ArrayList(); +- } +- this.endpoints.add(elem); +- } +- +- public List getEndpoints() { +- return this.endpoints; +- } +- +- public TokenRange setEndpoints(List endpoints) { +- this.endpoints = endpoints; +- return this; +- } +- +- public void unsetEndpoints() { +- this.endpoints = null; +- } +- +- /** Returns true if field endpoints is set (has been assigned a value) and false otherwise */ +- public boolean isSetEndpoints() { +- return this.endpoints != null; +- } +- +- public void setEndpointsIsSet(boolean value) { +- if (!value) { +- this.endpoints = null; +- } +- } +- +- public int getRpc_endpointsSize() { +- return (this.rpc_endpoints == null) ? 0 : this.rpc_endpoints.size(); +- } +- +- public java.util.Iterator getRpc_endpointsIterator() { +- return (this.rpc_endpoints == null) ? null : this.rpc_endpoints.iterator(); +- } +- +- public void addToRpc_endpoints(String elem) { +- if (this.rpc_endpoints == null) { +- this.rpc_endpoints = new ArrayList(); +- } +- this.rpc_endpoints.add(elem); +- } +- +- public List getRpc_endpoints() { +- return this.rpc_endpoints; +- } +- +- public TokenRange setRpc_endpoints(List rpc_endpoints) { +- this.rpc_endpoints = rpc_endpoints; +- return this; +- } +- +- public void unsetRpc_endpoints() { +- this.rpc_endpoints = null; +- } +- +- /** Returns true if field rpc_endpoints is set (has been assigned a value) and false otherwise */ +- public boolean isSetRpc_endpoints() { +- return this.rpc_endpoints != null; +- } +- +- public void setRpc_endpointsIsSet(boolean value) { +- if (!value) { +- this.rpc_endpoints = null; +- } +- } +- +- public int getEndpoint_detailsSize() { +- return (this.endpoint_details == null) ? 0 : this.endpoint_details.size(); +- } +- +- public java.util.Iterator getEndpoint_detailsIterator() { +- return (this.endpoint_details == null) ? null : this.endpoint_details.iterator(); +- } +- +- public void addToEndpoint_details(EndpointDetails elem) { +- if (this.endpoint_details == null) { +- this.endpoint_details = new ArrayList(); +- } +- this.endpoint_details.add(elem); +- } +- +- public List getEndpoint_details() { +- return this.endpoint_details; +- } +- +- public TokenRange setEndpoint_details(List endpoint_details) { +- this.endpoint_details = endpoint_details; +- return this; +- } +- +- public void unsetEndpoint_details() { +- this.endpoint_details = null; +- } +- +- /** Returns true if field endpoint_details is set (has been assigned a value) and false otherwise */ +- public boolean isSetEndpoint_details() { +- return this.endpoint_details != null; +- } +- +- public void setEndpoint_detailsIsSet(boolean value) { +- if (!value) { +- this.endpoint_details = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case START_TOKEN: +- if (value == null) { +- unsetStart_token(); +- } else { +- setStart_token((String)value); +- } +- break; +- +- case END_TOKEN: +- if (value == null) { +- unsetEnd_token(); +- } else { +- setEnd_token((String)value); +- } +- break; +- +- case ENDPOINTS: +- if (value == null) { +- unsetEndpoints(); +- } else { +- setEndpoints((List)value); +- } +- break; +- +- case RPC_ENDPOINTS: +- if (value == null) { +- unsetRpc_endpoints(); +- } else { +- setRpc_endpoints((List)value); +- } +- break; +- +- case ENDPOINT_DETAILS: +- if (value == null) { +- unsetEndpoint_details(); +- } else { +- setEndpoint_details((List)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case START_TOKEN: +- return getStart_token(); +- +- case END_TOKEN: +- return getEnd_token(); +- +- case ENDPOINTS: +- return getEndpoints(); +- +- case RPC_ENDPOINTS: +- return getRpc_endpoints(); +- +- case ENDPOINT_DETAILS: +- return getEndpoint_details(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case START_TOKEN: +- return isSetStart_token(); +- case END_TOKEN: +- return isSetEnd_token(); +- case ENDPOINTS: +- return isSetEndpoints(); +- case RPC_ENDPOINTS: +- return isSetRpc_endpoints(); +- case ENDPOINT_DETAILS: +- return isSetEndpoint_details(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof TokenRange) +- return this.equals((TokenRange)that); +- return false; +- } +- +- public boolean equals(TokenRange that) { +- if (that == null) +- return false; +- +- boolean this_present_start_token = true && this.isSetStart_token(); +- boolean that_present_start_token = true && that.isSetStart_token(); +- if (this_present_start_token || that_present_start_token) { +- if (!(this_present_start_token && that_present_start_token)) +- return false; +- if (!this.start_token.equals(that.start_token)) +- return false; +- } +- +- boolean this_present_end_token = true && this.isSetEnd_token(); +- boolean that_present_end_token = true && that.isSetEnd_token(); +- if (this_present_end_token || that_present_end_token) { +- if (!(this_present_end_token && that_present_end_token)) +- return false; +- if (!this.end_token.equals(that.end_token)) +- return false; +- } +- +- boolean this_present_endpoints = true && this.isSetEndpoints(); +- boolean that_present_endpoints = true && that.isSetEndpoints(); +- if (this_present_endpoints || that_present_endpoints) { +- if (!(this_present_endpoints && that_present_endpoints)) +- return false; +- if (!this.endpoints.equals(that.endpoints)) +- return false; +- } +- +- boolean this_present_rpc_endpoints = true && this.isSetRpc_endpoints(); +- boolean that_present_rpc_endpoints = true && that.isSetRpc_endpoints(); +- if (this_present_rpc_endpoints || that_present_rpc_endpoints) { +- if (!(this_present_rpc_endpoints && that_present_rpc_endpoints)) +- return false; +- if (!this.rpc_endpoints.equals(that.rpc_endpoints)) +- return false; +- } +- +- boolean this_present_endpoint_details = true && this.isSetEndpoint_details(); +- boolean that_present_endpoint_details = true && that.isSetEndpoint_details(); +- if (this_present_endpoint_details || that_present_endpoint_details) { +- if (!(this_present_endpoint_details && that_present_endpoint_details)) +- return false; +- if (!this.endpoint_details.equals(that.endpoint_details)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_start_token = true && (isSetStart_token()); +- builder.append(present_start_token); +- if (present_start_token) +- builder.append(start_token); +- +- boolean present_end_token = true && (isSetEnd_token()); +- builder.append(present_end_token); +- if (present_end_token) +- builder.append(end_token); +- +- boolean present_endpoints = true && (isSetEndpoints()); +- builder.append(present_endpoints); +- if (present_endpoints) +- builder.append(endpoints); +- +- boolean present_rpc_endpoints = true && (isSetRpc_endpoints()); +- builder.append(present_rpc_endpoints); +- if (present_rpc_endpoints) +- builder.append(rpc_endpoints); +- +- boolean present_endpoint_details = true && (isSetEndpoint_details()); +- builder.append(present_endpoint_details); +- if (present_endpoint_details) +- builder.append(endpoint_details); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(TokenRange other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetStart_token()).compareTo(other.isSetStart_token()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetStart_token()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start_token, other.start_token); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetEnd_token()).compareTo(other.isSetEnd_token()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetEnd_token()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.end_token, other.end_token); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetEndpoints()).compareTo(other.isSetEndpoints()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetEndpoints()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.endpoints, other.endpoints); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetRpc_endpoints()).compareTo(other.isSetRpc_endpoints()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetRpc_endpoints()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rpc_endpoints, other.rpc_endpoints); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetEndpoint_details()).compareTo(other.isSetEndpoint_details()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetEndpoint_details()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.endpoint_details, other.endpoint_details); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("TokenRange("); +- boolean first = true; +- +- sb.append("start_token:"); +- if (this.start_token == null) { +- sb.append("null"); +- } else { +- sb.append(this.start_token); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("end_token:"); +- if (this.end_token == null) { +- sb.append("null"); +- } else { +- sb.append(this.end_token); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("endpoints:"); +- if (this.endpoints == null) { +- sb.append("null"); +- } else { +- sb.append(this.endpoints); +- } +- first = false; +- if (isSetRpc_endpoints()) { +- if (!first) sb.append(", "); +- sb.append("rpc_endpoints:"); +- if (this.rpc_endpoints == null) { +- sb.append("null"); +- } else { +- sb.append(this.rpc_endpoints); +- } +- first = false; +- } +- if (isSetEndpoint_details()) { +- if (!first) sb.append(", "); +- sb.append("endpoint_details:"); +- if (this.endpoint_details == null) { +- sb.append("null"); +- } else { +- sb.append(this.endpoint_details); +- } +- first = false; +- } +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (start_token == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'start_token' was not present! Struct: " + toString()); +- } +- if (end_token == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'end_token' was not present! Struct: " + toString()); +- } +- if (endpoints == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'endpoints' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class TokenRangeStandardSchemeFactory implements SchemeFactory { +- public TokenRangeStandardScheme getScheme() { +- return new TokenRangeStandardScheme(); +- } +- } +- +- private static class TokenRangeStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, TokenRange struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // START_TOKEN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.start_token = iprot.readString(); +- struct.setStart_tokenIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // END_TOKEN +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.end_token = iprot.readString(); +- struct.setEnd_tokenIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 3: // ENDPOINTS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list56 = iprot.readListBegin(); +- struct.endpoints = new ArrayList(_list56.size); +- for (int _i57 = 0; _i57 < _list56.size; ++_i57) +- { +- String _elem58; +- _elem58 = iprot.readString(); +- struct.endpoints.add(_elem58); +- } +- iprot.readListEnd(); +- } +- struct.setEndpointsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 4: // RPC_ENDPOINTS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list59 = iprot.readListBegin(); +- struct.rpc_endpoints = new ArrayList(_list59.size); +- for (int _i60 = 0; _i60 < _list59.size; ++_i60) +- { +- String _elem61; +- _elem61 = iprot.readString(); +- struct.rpc_endpoints.add(_elem61); +- } +- iprot.readListEnd(); +- } +- struct.setRpc_endpointsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 5: // ENDPOINT_DETAILS +- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { +- { +- org.apache.thrift.protocol.TList _list62 = iprot.readListBegin(); +- struct.endpoint_details = new ArrayList(_list62.size); +- for (int _i63 = 0; _i63 < _list62.size; ++_i63) +- { +- EndpointDetails _elem64; +- _elem64 = new EndpointDetails(); +- _elem64.read(iprot); +- struct.endpoint_details.add(_elem64); +- } +- iprot.readListEnd(); +- } +- struct.setEndpoint_detailsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, TokenRange struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.start_token != null) { +- oprot.writeFieldBegin(START_TOKEN_FIELD_DESC); +- oprot.writeString(struct.start_token); +- oprot.writeFieldEnd(); +- } +- if (struct.end_token != null) { +- oprot.writeFieldBegin(END_TOKEN_FIELD_DESC); +- oprot.writeString(struct.end_token); +- oprot.writeFieldEnd(); +- } +- if (struct.endpoints != null) { +- oprot.writeFieldBegin(ENDPOINTS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.endpoints.size())); +- for (String _iter65 : struct.endpoints) +- { +- oprot.writeString(_iter65); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- if (struct.rpc_endpoints != null) { +- if (struct.isSetRpc_endpoints()) { +- oprot.writeFieldBegin(RPC_ENDPOINTS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.rpc_endpoints.size())); +- for (String _iter66 : struct.rpc_endpoints) +- { +- oprot.writeString(_iter66); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- if (struct.endpoint_details != null) { +- if (struct.isSetEndpoint_details()) { +- oprot.writeFieldBegin(ENDPOINT_DETAILS_FIELD_DESC); +- { +- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.endpoint_details.size())); +- for (EndpointDetails _iter67 : struct.endpoint_details) +- { +- _iter67.write(oprot); +- } +- oprot.writeListEnd(); +- } +- oprot.writeFieldEnd(); +- } +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class TokenRangeTupleSchemeFactory implements SchemeFactory { +- public TokenRangeTupleScheme getScheme() { +- return new TokenRangeTupleScheme(); +- } +- } +- +- private static class TokenRangeTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, TokenRange struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.start_token); +- oprot.writeString(struct.end_token); +- { +- oprot.writeI32(struct.endpoints.size()); +- for (String _iter68 : struct.endpoints) +- { +- oprot.writeString(_iter68); +- } +- } +- BitSet optionals = new BitSet(); +- if (struct.isSetRpc_endpoints()) { +- optionals.set(0); +- } +- if (struct.isSetEndpoint_details()) { +- optionals.set(1); +- } +- oprot.writeBitSet(optionals, 2); +- if (struct.isSetRpc_endpoints()) { +- { +- oprot.writeI32(struct.rpc_endpoints.size()); +- for (String _iter69 : struct.rpc_endpoints) +- { +- oprot.writeString(_iter69); +- } +- } +- } +- if (struct.isSetEndpoint_details()) { +- { +- oprot.writeI32(struct.endpoint_details.size()); +- for (EndpointDetails _iter70 : struct.endpoint_details) +- { +- _iter70.write(oprot); +- } +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, TokenRange struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.start_token = iprot.readString(); +- struct.setStart_tokenIsSet(true); +- struct.end_token = iprot.readString(); +- struct.setEnd_tokenIsSet(true); +- { +- org.apache.thrift.protocol.TList _list71 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.endpoints = new ArrayList(_list71.size); +- for (int _i72 = 0; _i72 < _list71.size; ++_i72) +- { +- String _elem73; +- _elem73 = iprot.readString(); +- struct.endpoints.add(_elem73); +- } +- } +- struct.setEndpointsIsSet(true); +- BitSet incoming = iprot.readBitSet(2); +- if (incoming.get(0)) { +- { +- org.apache.thrift.protocol.TList _list74 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.rpc_endpoints = new ArrayList(_list74.size); +- for (int _i75 = 0; _i75 < _list74.size; ++_i75) +- { +- String _elem76; +- _elem76 = iprot.readString(); +- struct.rpc_endpoints.add(_elem76); +- } +- } +- struct.setRpc_endpointsIsSet(true); +- } +- if (incoming.get(1)) { +- { +- org.apache.thrift.protocol.TList _list77 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +- struct.endpoint_details = new ArrayList(_list77.size); +- for (int _i78 = 0; _i78 < _list77.size; ++_i78) +- { +- EndpointDetails _elem79; +- _elem79 = new EndpointDetails(); +- _elem79.read(iprot); +- struct.endpoint_details.add(_elem79); +- } +- } +- struct.setEndpoint_detailsIsSet(true); +- } +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/TriggerDef.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/TriggerDef.java +deleted file mode 100644 +index 32b0ac5..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/TriggerDef.java ++++ /dev/null +@@ -1,568 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Describes a trigger. +- * `options` should include at least 'class' param. +- * Other options are not supported yet. +- */ +-public class TriggerDef implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TriggerDef"); +- +- private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); +- private static final org.apache.thrift.protocol.TField OPTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("options", org.apache.thrift.protocol.TType.MAP, (short)2); +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new TriggerDefStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new TriggerDefTupleSchemeFactory()); +- } +- +- public String name; // required +- public Map options; // required +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +- NAME((short)1, "name"), +- OPTIONS((short)2, "options"); +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- case 1: // NAME +- return NAME; +- case 2: // OPTIONS +- return OPTIONS; +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- +- // isset id assignments +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); +- tmpMap.put(_Fields.OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("options", org.apache.thrift.TFieldRequirementType.REQUIRED, +- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), +- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TriggerDef.class, metaDataMap); +- } +- +- public TriggerDef() { +- } +- +- public TriggerDef( +- String name, +- Map options) +- { +- this(); +- this.name = name; +- this.options = options; +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public TriggerDef(TriggerDef other) { +- if (other.isSetName()) { +- this.name = other.name; +- } +- if (other.isSetOptions()) { +- Map __this__options = new HashMap(other.options); +- this.options = __this__options; +- } +- } +- +- public TriggerDef deepCopy() { +- return new TriggerDef(this); +- } +- +- @Override +- public void clear() { +- this.name = null; +- this.options = null; +- } +- +- public String getName() { +- return this.name; +- } +- +- public TriggerDef setName(String name) { +- this.name = name; +- return this; +- } +- +- public void unsetName() { +- this.name = null; +- } +- +- /** Returns true if field name is set (has been assigned a value) and false otherwise */ +- public boolean isSetName() { +- return this.name != null; +- } +- +- public void setNameIsSet(boolean value) { +- if (!value) { +- this.name = null; +- } +- } +- +- public int getOptionsSize() { +- return (this.options == null) ? 0 : this.options.size(); +- } +- +- public void putToOptions(String key, String val) { +- if (this.options == null) { +- this.options = new HashMap(); +- } +- this.options.put(key, val); +- } +- +- public Map getOptions() { +- return this.options; +- } +- +- public TriggerDef setOptions(Map options) { +- this.options = options; +- return this; +- } +- +- public void unsetOptions() { +- this.options = null; +- } +- +- /** Returns true if field options is set (has been assigned a value) and false otherwise */ +- public boolean isSetOptions() { +- return this.options != null; +- } +- +- public void setOptionsIsSet(boolean value) { +- if (!value) { +- this.options = null; +- } +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- case NAME: +- if (value == null) { +- unsetName(); +- } else { +- setName((String)value); +- } +- break; +- +- case OPTIONS: +- if (value == null) { +- unsetOptions(); +- } else { +- setOptions((Map)value); +- } +- break; +- +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- case NAME: +- return getName(); +- +- case OPTIONS: +- return getOptions(); +- +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- case NAME: +- return isSetName(); +- case OPTIONS: +- return isSetOptions(); +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof TriggerDef) +- return this.equals((TriggerDef)that); +- return false; +- } +- +- public boolean equals(TriggerDef that) { +- if (that == null) +- return false; +- +- boolean this_present_name = true && this.isSetName(); +- boolean that_present_name = true && that.isSetName(); +- if (this_present_name || that_present_name) { +- if (!(this_present_name && that_present_name)) +- return false; +- if (!this.name.equals(that.name)) +- return false; +- } +- +- boolean this_present_options = true && this.isSetOptions(); +- boolean that_present_options = true && that.isSetOptions(); +- if (this_present_options || that_present_options) { +- if (!(this_present_options && that_present_options)) +- return false; +- if (!this.options.equals(that.options)) +- return false; +- } +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- boolean present_name = true && (isSetName()); +- builder.append(present_name); +- if (present_name) +- builder.append(name); +- +- boolean present_options = true && (isSetOptions()); +- builder.append(present_options); +- if (present_options) +- builder.append(options); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(TriggerDef other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetName()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- lastComparison = Boolean.valueOf(isSetOptions()).compareTo(other.isSetOptions()); +- if (lastComparison != 0) { +- return lastComparison; +- } +- if (isSetOptions()) { +- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.options, other.options); +- if (lastComparison != 0) { +- return lastComparison; +- } +- } +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("TriggerDef("); +- boolean first = true; +- +- sb.append("name:"); +- if (this.name == null) { +- sb.append("null"); +- } else { +- sb.append(this.name); +- } +- first = false; +- if (!first) sb.append(", "); +- sb.append("options:"); +- if (this.options == null) { +- sb.append("null"); +- } else { +- sb.append(this.options); +- } +- first = false; +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- if (name == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString()); +- } +- if (options == null) { +- throw new org.apache.thrift.protocol.TProtocolException("Required field 'options' was not present! Struct: " + toString()); +- } +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class TriggerDefStandardSchemeFactory implements SchemeFactory { +- public TriggerDefStandardScheme getScheme() { +- return new TriggerDefStandardScheme(); +- } +- } +- +- private static class TriggerDefStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, TriggerDef struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- case 1: // NAME +- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { +- struct.name = iprot.readString(); +- struct.setNameIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- case 2: // OPTIONS +- if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { +- { +- org.apache.thrift.protocol.TMap _map100 = iprot.readMapBegin(); +- struct.options = new HashMap(2*_map100.size); +- for (int _i101 = 0; _i101 < _map100.size; ++_i101) +- { +- String _key102; +- String _val103; +- _key102 = iprot.readString(); +- _val103 = iprot.readString(); +- struct.options.put(_key102, _val103); +- } +- iprot.readMapEnd(); +- } +- struct.setOptionsIsSet(true); +- } else { +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- break; +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, TriggerDef struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- if (struct.name != null) { +- oprot.writeFieldBegin(NAME_FIELD_DESC); +- oprot.writeString(struct.name); +- oprot.writeFieldEnd(); +- } +- if (struct.options != null) { +- oprot.writeFieldBegin(OPTIONS_FIELD_DESC); +- { +- oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.options.size())); +- for (Map.Entry _iter104 : struct.options.entrySet()) +- { +- oprot.writeString(_iter104.getKey()); +- oprot.writeString(_iter104.getValue()); +- } +- oprot.writeMapEnd(); +- } +- oprot.writeFieldEnd(); +- } +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class TriggerDefTupleSchemeFactory implements SchemeFactory { +- public TriggerDefTupleScheme getScheme() { +- return new TriggerDefTupleScheme(); +- } +- } +- +- private static class TriggerDefTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, TriggerDef struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- oprot.writeString(struct.name); +- { +- oprot.writeI32(struct.options.size()); +- for (Map.Entry _iter105 : struct.options.entrySet()) +- { +- oprot.writeString(_iter105.getKey()); +- oprot.writeString(_iter105.getValue()); +- } +- } +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, TriggerDef struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- struct.name = iprot.readString(); +- struct.setNameIsSet(true); +- { +- org.apache.thrift.protocol.TMap _map106 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); +- struct.options = new HashMap(2*_map106.size); +- for (int _i107 = 0; _i107 < _map106.size; ++_i107) +- { +- String _key108; +- String _val109; +- _key108 = iprot.readString(); +- _val109 = iprot.readString(); +- struct.options.put(_key108, _val109); +- } +- } +- struct.setOptionsIsSet(true); +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/UnavailableException.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/UnavailableException.java +deleted file mode 100644 +index 23bfeed..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/UnavailableException.java ++++ /dev/null +@@ -1,307 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Not all the replicas required could be created and/or read. +- */ +-public class UnavailableException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { +- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnavailableException"); +- +- +- private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); +- static { +- schemes.put(StandardScheme.class, new UnavailableExceptionStandardSchemeFactory()); +- schemes.put(TupleScheme.class, new UnavailableExceptionTupleSchemeFactory()); +- } +- +- +- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ +- public enum _Fields implements org.apache.thrift.TFieldIdEnum { +-; +- +- private static final Map byName = new HashMap(); +- +- static { +- for (_Fields field : EnumSet.allOf(_Fields.class)) { +- byName.put(field.getFieldName(), field); +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, or null if its not found. +- */ +- public static _Fields findByThriftId(int fieldId) { +- switch(fieldId) { +- default: +- return null; +- } +- } +- +- /** +- * Find the _Fields constant that matches fieldId, throwing an exception +- * if it is not found. +- */ +- public static _Fields findByThriftIdOrThrow(int fieldId) { +- _Fields fields = findByThriftId(fieldId); +- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); +- return fields; +- } +- +- /** +- * Find the _Fields constant that matches name, or null if its not found. +- */ +- public static _Fields findByName(String name) { +- return byName.get(name); +- } +- +- private final short _thriftId; +- private final String _fieldName; +- +- _Fields(short thriftId, String fieldName) { +- _thriftId = thriftId; +- _fieldName = fieldName; +- } +- +- public short getThriftFieldId() { +- return _thriftId; +- } +- +- public String getFieldName() { +- return _fieldName; +- } +- } +- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; +- static { +- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); +- metaDataMap = Collections.unmodifiableMap(tmpMap); +- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(UnavailableException.class, metaDataMap); +- } +- +- public UnavailableException() { +- } +- +- /** +- * Performs a deep copy on other. +- */ +- public UnavailableException(UnavailableException other) { +- } +- +- public UnavailableException deepCopy() { +- return new UnavailableException(this); +- } +- +- @Override +- public void clear() { +- } +- +- public void setFieldValue(_Fields field, Object value) { +- switch (field) { +- } +- } +- +- public Object getFieldValue(_Fields field) { +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ +- public boolean isSet(_Fields field) { +- if (field == null) { +- throw new IllegalArgumentException(); +- } +- +- switch (field) { +- } +- throw new IllegalStateException(); +- } +- +- @Override +- public boolean equals(Object that) { +- if (that == null) +- return false; +- if (that instanceof UnavailableException) +- return this.equals((UnavailableException)that); +- return false; +- } +- +- public boolean equals(UnavailableException that) { +- if (that == null) +- return false; +- +- return true; +- } +- +- @Override +- public int hashCode() { +- HashCodeBuilder builder = new HashCodeBuilder(); +- +- return builder.toHashCode(); +- } +- +- @Override +- public int compareTo(UnavailableException other) { +- if (!getClass().equals(other.getClass())) { +- return getClass().getName().compareTo(other.getClass().getName()); +- } +- +- int lastComparison = 0; +- +- return 0; +- } +- +- public _Fields fieldForId(int fieldId) { +- return _Fields.findByThriftId(fieldId); +- } +- +- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { +- schemes.get(iprot.getScheme()).getScheme().read(iprot, this); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { +- schemes.get(oprot.getScheme()).getScheme().write(oprot, this); +- } +- +- @Override +- public String toString() { +- StringBuilder sb = new StringBuilder("UnavailableException("); +- boolean first = true; +- +- sb.append(")"); +- return sb.toString(); +- } +- +- public void validate() throws org.apache.thrift.TException { +- // check for required fields +- // check for sub-struct validity +- } +- +- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { +- try { +- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { +- try { +- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); +- } catch (org.apache.thrift.TException te) { +- throw new java.io.IOException(te); +- } +- } +- +- private static class UnavailableExceptionStandardSchemeFactory implements SchemeFactory { +- public UnavailableExceptionStandardScheme getScheme() { +- return new UnavailableExceptionStandardScheme(); +- } +- } +- +- private static class UnavailableExceptionStandardScheme extends StandardScheme { +- +- public void read(org.apache.thrift.protocol.TProtocol iprot, UnavailableException struct) throws org.apache.thrift.TException { +- org.apache.thrift.protocol.TField schemeField; +- iprot.readStructBegin(); +- while (true) +- { +- schemeField = iprot.readFieldBegin(); +- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { +- break; +- } +- switch (schemeField.id) { +- default: +- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); +- } +- iprot.readFieldEnd(); +- } +- iprot.readStructEnd(); +- +- // check for required fields of primitive type, which can't be checked in the validate method +- struct.validate(); +- } +- +- public void write(org.apache.thrift.protocol.TProtocol oprot, UnavailableException struct) throws org.apache.thrift.TException { +- struct.validate(); +- +- oprot.writeStructBegin(STRUCT_DESC); +- oprot.writeFieldStop(); +- oprot.writeStructEnd(); +- } +- +- } +- +- private static class UnavailableExceptionTupleSchemeFactory implements SchemeFactory { +- public UnavailableExceptionTupleScheme getScheme() { +- return new UnavailableExceptionTupleScheme(); +- } +- } +- +- private static class UnavailableExceptionTupleScheme extends TupleScheme { +- +- @Override +- public void write(org.apache.thrift.protocol.TProtocol prot, UnavailableException struct) throws org.apache.thrift.TException { +- TTupleProtocol oprot = (TTupleProtocol) prot; +- } +- +- @Override +- public void read(org.apache.thrift.protocol.TProtocol prot, UnavailableException struct) throws org.apache.thrift.TException { +- TTupleProtocol iprot = (TTupleProtocol) prot; +- } +- } +- +-} +- +diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/cassandraConstants.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/cassandraConstants.java +deleted file mode 100644 +index f84243e..0000000 +--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/cassandraConstants.java ++++ /dev/null +@@ -1,61 +0,0 @@ +-/** +- * Autogenerated by Thrift Compiler (0.9.1) +- * +- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +- * @generated +- */ +-package org.apache.cassandra.thrift; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import org.apache.commons.lang3.builder.HashCodeBuilder; +-import org.apache.thrift.scheme.IScheme; +-import org.apache.thrift.scheme.SchemeFactory; +-import org.apache.thrift.scheme.StandardScheme; +- +-import org.apache.thrift.scheme.TupleScheme; +-import org.apache.thrift.protocol.TTupleProtocol; +-import org.apache.thrift.protocol.TProtocolException; +-import org.apache.thrift.EncodingUtils; +-import org.apache.thrift.TException; +-import org.apache.thrift.async.AsyncMethodCallback; +-import org.apache.thrift.server.AbstractNonblockingServer.*; +-import java.util.List; +-import java.util.ArrayList; +-import java.util.Map; +-import java.util.HashMap; +-import java.util.EnumMap; +-import java.util.Set; +-import java.util.HashSet; +-import java.util.EnumSet; +-import java.util.Collections; +-import java.util.BitSet; +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class cassandraConstants { +- +- public static final String VERSION = "20.1.0"; +- +-} +diff --git a/lib/licenses/disruptor-3.0.1.txt b/lib/licenses/disruptor-3.0.1.txt +deleted file mode 100644 +index 50086f8..0000000 +--- a/lib/licenses/disruptor-3.0.1.txt ++++ /dev/null +@@ -1,201 +0,0 @@ +- Apache License +- Version 2.0, January 2004 +- http://www.apache.org/licenses/ +- +- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +- +- 1. Definitions. +- +- "License" shall mean the terms and conditions for use, reproduction, +- and distribution as defined by Sections 1 through 9 of this document. +- +- "Licensor" shall mean the copyright owner or entity authorized by +- the copyright owner that is granting the License. +- +- "Legal Entity" shall mean the union of the acting entity and all +- other entities that control, are controlled by, or are under common +- control with that entity. For the purposes of this definition, +- "control" means (i) the power, direct or indirect, to cause the +- direction or management of such entity, whether by contract or +- otherwise, or (ii) ownership of fifty percent (50%) or more of the +- outstanding shares, or (iii) beneficial ownership of such entity. +- +- "You" (or "Your") shall mean an individual or Legal Entity +- exercising permissions granted by this License. +- +- "Source" form shall mean the preferred form for making modifications, +- including but not limited to software source code, documentation +- source, and configuration files. +- +- "Object" form shall mean any form resulting from mechanical +- transformation or translation of a Source form, including but +- not limited to compiled object code, generated documentation, +- and conversions to other media types. +- +- "Work" shall mean the work of authorship, whether in Source or +- Object form, made available under the License, as indicated by a +- copyright notice that is included in or attached to the work +- (an example is provided in the Appendix below). +- +- "Derivative Works" shall mean any work, whether in Source or Object +- form, that is based on (or derived from) the Work and for which the +- editorial revisions, annotations, elaborations, or other modifications +- represent, as a whole, an original work of authorship. For the purposes +- of this License, Derivative Works shall not include works that remain +- separable from, or merely link (or bind by name) to the interfaces of, +- the Work and Derivative Works thereof. +- +- "Contribution" shall mean any work of authorship, including +- the original version of the Work and any modifications or additions +- to that Work or Derivative Works thereof, that is intentionally +- submitted to Licensor for inclusion in the Work by the copyright owner +- or by an individual or Legal Entity authorized to submit on behalf of +- the copyright owner. For the purposes of this definition, "submitted" +- means any form of electronic, verbal, or written communication sent +- to the Licensor or its representatives, including but not limited to +- communication on electronic mailing lists, source code control systems, +- and issue tracking systems that are managed by, or on behalf of, the +- Licensor for the purpose of discussing and improving the Work, but +- excluding communication that is conspicuously marked or otherwise +- designated in writing by the copyright owner as "Not a Contribution." +- +- "Contributor" shall mean Licensor and any individual or Legal Entity +- on behalf of whom a Contribution has been received by Licensor and +- subsequently incorporated within the Work. +- +- 2. Grant of Copyright License. Subject to the terms and conditions of +- this License, each Contributor hereby grants to You a perpetual, +- worldwide, non-exclusive, no-charge, royalty-free, irrevocable +- copyright license to reproduce, prepare Derivative Works of, +- publicly display, publicly perform, sublicense, and distribute the +- Work and such Derivative Works in Source or Object form. +- +- 3. Grant of Patent License. Subject to the terms and conditions of +- this License, each Contributor hereby grants to You a perpetual, +- worldwide, non-exclusive, no-charge, royalty-free, irrevocable +- (except as stated in this section) patent license to make, have made, +- use, offer to sell, sell, import, and otherwise transfer the Work, +- where such license applies only to those patent claims licensable +- by such Contributor that are necessarily infringed by their +- Contribution(s) alone or by combination of their Contribution(s) +- with the Work to which such Contribution(s) was submitted. If You +- institute patent litigation against any entity (including a +- cross-claim or counterclaim in a lawsuit) alleging that the Work +- or a Contribution incorporated within the Work constitutes direct +- or contributory patent infringement, then any patent licenses +- granted to You under this License for that Work shall terminate +- as of the date such litigation is filed. +- +- 4. Redistribution. You may reproduce and distribute copies of the +- Work or Derivative Works thereof in any medium, with or without +- modifications, and in Source or Object form, provided that You +- meet the following conditions: +- +- (a) You must give any other recipients of the Work or +- Derivative Works a copy of this License; and +- +- (b) You must cause any modified files to carry prominent notices +- stating that You changed the files; and +- +- (c) You must retain, in the Source form of any Derivative Works +- that You distribute, all copyright, patent, trademark, and +- attribution notices from the Source form of the Work, +- excluding those notices that do not pertain to any part of +- the Derivative Works; and +- +- (d) If the Work includes a "NOTICE" text file as part of its +- distribution, then any Derivative Works that You distribute must +- include a readable copy of the attribution notices contained +- within such NOTICE file, excluding those notices that do not +- pertain to any part of the Derivative Works, in at least one +- of the following places: within a NOTICE text file distributed +- as part of the Derivative Works; within the Source form or +- documentation, if provided along with the Derivative Works; or, +- within a display generated by the Derivative Works, if and +- wherever such third-party notices normally appear. The contents +- of the NOTICE file are for informational purposes only and +- do not modify the License. You may add Your own attribution +- notices within Derivative Works that You distribute, alongside +- or as an addendum to the NOTICE text from the Work, provided +- that such additional attribution notices cannot be construed +- as modifying the License. +- +- You may add Your own copyright statement to Your modifications and +- may provide additional or different license terms and conditions +- for use, reproduction, or distribution of Your modifications, or +- for any such Derivative Works as a whole, provided Your use, +- reproduction, and distribution of the Work otherwise complies with +- the conditions stated in this License. +- +- 5. Submission of Contributions. Unless You explicitly state otherwise, +- any Contribution intentionally submitted for inclusion in the Work +- by You to the Licensor shall be under the terms and conditions of +- this License, without any additional terms or conditions. +- Notwithstanding the above, nothing herein shall supersede or modify +- the terms of any separate license agreement you may have executed +- with Licensor regarding such Contributions. +- +- 6. Trademarks. This License does not grant permission to use the trade +- names, trademarks, service marks, or product names of the Licensor, +- except as required for reasonable and customary use in describing the +- origin of the Work and reproducing the content of the NOTICE file. +- +- 7. Disclaimer of Warranty. Unless required by applicable law or +- agreed to in writing, Licensor provides the Work (and each +- Contributor provides its Contributions) on an "AS IS" BASIS, +- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +- implied, including, without limitation, any warranties or conditions +- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +- PARTICULAR PURPOSE. You are solely responsible for determining the +- appropriateness of using or redistributing the Work and assume any +- risks associated with Your exercise of permissions under this License. +- +- 8. Limitation of Liability. In no event and under no legal theory, +- whether in tort (including negligence), contract, or otherwise, +- unless required by applicable law (such as deliberate and grossly +- negligent acts) or agreed to in writing, shall any Contributor be +- liable to You for damages, including any direct, indirect, special, +- incidental, or consequential damages of any character arising as a +- result of this License or out of the use or inability to use the +- Work (including but not limited to damages for loss of goodwill, +- work stoppage, computer failure or malfunction, or any and all +- other commercial damages or losses), even if such Contributor +- has been advised of the possibility of such damages. +- +- 9. Accepting Warranty or Additional Liability. While redistributing +- the Work or Derivative Works thereof, You may choose to offer, +- and charge a fee for, acceptance of support, warranty, indemnity, +- or other liability obligations and/or rights consistent with this +- License. However, in accepting such obligations, You may act only +- on Your own behalf and on Your sole responsibility, not on behalf +- of any other Contributor, and only if You agree to indemnify, +- defend, and hold each Contributor harmless for any liability +- incurred by, or claims asserted against, such Contributor by reason +- of your accepting any such warranty or additional liability. +- +- END OF TERMS AND CONDITIONS +- +- APPENDIX: How to apply the Apache License to your work. +- +- To apply the Apache License to your work, attach the following +- boilerplate notice, with the fields enclosed by brackets "[]" +- replaced with your own identifying information. (Don't include +- the brackets!) The text should be enclosed in the appropriate +- comment syntax for the file format. We also recommend that a +- file or class name and description of purpose be included on the +- same "printed page" as the copyright notice for easier +- identification within third-party archives. +- +- Copyright [yyyy] [name of copyright owner] +- +- Licensed under the Apache License, Version 2.0 (the "License"); +- you may not use this file except in compliance with the License. +- You may obtain a copy of the License at +- +- http://www.apache.org/licenses/LICENSE-2.0 +- +- Unless required by applicable law or agreed to in writing, software +- distributed under the License is distributed on an "AS IS" BASIS, +- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- See the License for the specific language governing permissions and +- limitations under the License. +\ No newline at end of file +diff --git a/lib/licenses/libthrift-0.9.2.txt b/lib/licenses/libthrift-0.9.2.txt +deleted file mode 100644 +index d645695..0000000 +--- a/lib/licenses/libthrift-0.9.2.txt ++++ /dev/null +@@ -1,202 +0,0 @@ +- +- Apache License +- Version 2.0, January 2004 +- http://www.apache.org/licenses/ +- +- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +- +- 1. Definitions. +- +- "License" shall mean the terms and conditions for use, reproduction, +- and distribution as defined by Sections 1 through 9 of this document. +- +- "Licensor" shall mean the copyright owner or entity authorized by +- the copyright owner that is granting the License. +- +- "Legal Entity" shall mean the union of the acting entity and all +- other entities that control, are controlled by, or are under common +- control with that entity. For the purposes of this definition, +- "control" means (i) the power, direct or indirect, to cause the +- direction or management of such entity, whether by contract or +- otherwise, or (ii) ownership of fifty percent (50%) or more of the +- outstanding shares, or (iii) beneficial ownership of such entity. +- +- "You" (or "Your") shall mean an individual or Legal Entity +- exercising permissions granted by this License. +- +- "Source" form shall mean the preferred form for making modifications, +- including but not limited to software source code, documentation +- source, and configuration files. +- +- "Object" form shall mean any form resulting from mechanical +- transformation or translation of a Source form, including but +- not limited to compiled object code, generated documentation, +- and conversions to other media types. +- +- "Work" shall mean the work of authorship, whether in Source or +- Object form, made available under the License, as indicated by a +- copyright notice that is included in or attached to the work +- (an example is provided in the Appendix below). +- +- "Derivative Works" shall mean any work, whether in Source or Object +- form, that is based on (or derived from) the Work and for which the +- editorial revisions, annotations, elaborations, or other modifications +- represent, as a whole, an original work of authorship. For the purposes +- of this License, Derivative Works shall not include works that remain +- separable from, or merely link (or bind by name) to the interfaces of, +- the Work and Derivative Works thereof. +- +- "Contribution" shall mean any work of authorship, including +- the original version of the Work and any modifications or additions +- to that Work or Derivative Works thereof, that is intentionally +- submitted to Licensor for inclusion in the Work by the copyright owner +- or by an individual or Legal Entity authorized to submit on behalf of +- the copyright owner. For the purposes of this definition, "submitted" +- means any form of electronic, verbal, or written communication sent +- to the Licensor or its representatives, including but not limited to +- communication on electronic mailing lists, source code control systems, +- and issue tracking systems that are managed by, or on behalf of, the +- Licensor for the purpose of discussing and improving the Work, but +- excluding communication that is conspicuously marked or otherwise +- designated in writing by the copyright owner as "Not a Contribution." +- +- "Contributor" shall mean Licensor and any individual or Legal Entity +- on behalf of whom a Contribution has been received by Licensor and +- subsequently incorporated within the Work. +- +- 2. Grant of Copyright License. Subject to the terms and conditions of +- this License, each Contributor hereby grants to You a perpetual, +- worldwide, non-exclusive, no-charge, royalty-free, irrevocable +- copyright license to reproduce, prepare Derivative Works of, +- publicly display, publicly perform, sublicense, and distribute the +- Work and such Derivative Works in Source or Object form. +- +- 3. Grant of Patent License. Subject to the terms and conditions of +- this License, each Contributor hereby grants to You a perpetual, +- worldwide, non-exclusive, no-charge, royalty-free, irrevocable +- (except as stated in this section) patent license to make, have made, +- use, offer to sell, sell, import, and otherwise transfer the Work, +- where such license applies only to those patent claims licensable +- by such Contributor that are necessarily infringed by their +- Contribution(s) alone or by combination of their Contribution(s) +- with the Work to which such Contribution(s) was submitted. If You +- institute patent litigation against any entity (including a +- cross-claim or counterclaim in a lawsuit) alleging that the Work +- or a Contribution incorporated within the Work constitutes direct +- or contributory patent infringement, then any patent licenses +- granted to You under this License for that Work shall terminate +- as of the date such litigation is filed. +- +- 4. Redistribution. You may reproduce and distribute copies of the +- Work or Derivative Works thereof in any medium, with or without +- modifications, and in Source or Object form, provided that You +- meet the following conditions: +- +- (a) You must give any other recipients of the Work or +- Derivative Works a copy of this License; and +- +- (b) You must cause any modified files to carry prominent notices +- stating that You changed the files; and +- +- (c) You must retain, in the Source form of any Derivative Works +- that You distribute, all copyright, patent, trademark, and +- attribution notices from the Source form of the Work, +- excluding those notices that do not pertain to any part of +- the Derivative Works; and +- +- (d) If the Work includes a "NOTICE" text file as part of its +- distribution, then any Derivative Works that You distribute must +- include a readable copy of the attribution notices contained +- within such NOTICE file, excluding those notices that do not +- pertain to any part of the Derivative Works, in at least one +- of the following places: within a NOTICE text file distributed +- as part of the Derivative Works; within the Source form or +- documentation, if provided along with the Derivative Works; or, +- within a display generated by the Derivative Works, if and +- wherever such third-party notices normally appear. The contents +- of the NOTICE file are for informational purposes only and +- do not modify the License. You may add Your own attribution +- notices within Derivative Works that You distribute, alongside +- or as an addendum to the NOTICE text from the Work, provided +- that such additional attribution notices cannot be construed +- as modifying the License. +- +- You may add Your own copyright statement to Your modifications and +- may provide additional or different license terms and conditions +- for use, reproduction, or distribution of Your modifications, or +- for any such Derivative Works as a whole, provided Your use, +- reproduction, and distribution of the Work otherwise complies with +- the conditions stated in this License. +- +- 5. Submission of Contributions. Unless You explicitly state otherwise, +- any Contribution intentionally submitted for inclusion in the Work +- by You to the Licensor shall be under the terms and conditions of +- this License, without any additional terms or conditions. +- Notwithstanding the above, nothing herein shall supersede or modify +- the terms of any separate license agreement you may have executed +- with Licensor regarding such Contributions. +- +- 6. Trademarks. This License does not grant permission to use the trade +- names, trademarks, service marks, or product names of the Licensor, +- except as required for reasonable and customary use in describing the +- origin of the Work and reproducing the content of the NOTICE file. +- +- 7. Disclaimer of Warranty. Unless required by applicable law or +- agreed to in writing, Licensor provides the Work (and each +- Contributor provides its Contributions) on an "AS IS" BASIS, +- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +- implied, including, without limitation, any warranties or conditions +- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +- PARTICULAR PURPOSE. You are solely responsible for determining the +- appropriateness of using or redistributing the Work and assume any +- risks associated with Your exercise of permissions under this License. +- +- 8. Limitation of Liability. In no event and under no legal theory, +- whether in tort (including negligence), contract, or otherwise, +- unless required by applicable law (such as deliberate and grossly +- negligent acts) or agreed to in writing, shall any Contributor be +- liable to You for damages, including any direct, indirect, special, +- incidental, or consequential damages of any character arising as a +- result of this License or out of the use or inability to use the +- Work (including but not limited to damages for loss of goodwill, +- work stoppage, computer failure or malfunction, or any and all +- other commercial damages or losses), even if such Contributor +- has been advised of the possibility of such damages. +- +- 9. Accepting Warranty or Additional Liability. While redistributing +- the Work or Derivative Works thereof, You may choose to offer, +- and charge a fee for, acceptance of support, warranty, indemnity, +- or other liability obligations and/or rights consistent with this +- License. However, in accepting such obligations, You may act only +- on Your own behalf and on Your sole responsibility, not on behalf +- of any other Contributor, and only if You agree to indemnify, +- defend, and hold each Contributor harmless for any liability +- incurred by, or claims asserted against, such Contributor by reason +- of your accepting any such warranty or additional liability. +- +- END OF TERMS AND CONDITIONS +- +- APPENDIX: How to apply the Apache License to your work. +- +- To apply the Apache License to your work, attach the following +- boilerplate notice, with the fields enclosed by brackets "[]" +- replaced with your own identifying information. (Don't include +- the brackets!) The text should be enclosed in the appropriate +- comment syntax for the file format. We also recommend that a +- file or class name and description of purpose be included on the +- same "printed page" as the copyright notice for easier +- identification within third-party archives. +- +- Copyright [yyyy] [name of copyright owner] +- +- Licensed under the Apache License, Version 2.0 (the "License"); +- you may not use this file except in compliance with the License. +- You may obtain a copy of the License at +- +- http://www.apache.org/licenses/LICENSE-2.0 +- +- Unless required by applicable law or agreed to in writing, software +- distributed under the License is distributed on an "AS IS" BASIS, +- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- See the License for the specific language governing permissions and +- limitations under the License. +diff --git a/lib/licenses/thrift-server-0.3.7.txt b/lib/licenses/thrift-server-0.3.7.txt +deleted file mode 100644 +index d645695..0000000 +--- a/lib/licenses/thrift-server-0.3.7.txt ++++ /dev/null +@@ -1,202 +0,0 @@ +- +- Apache License +- Version 2.0, January 2004 +- http://www.apache.org/licenses/ +- +- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +- +- 1. Definitions. +- +- "License" shall mean the terms and conditions for use, reproduction, +- and distribution as defined by Sections 1 through 9 of this document. +- +- "Licensor" shall mean the copyright owner or entity authorized by +- the copyright owner that is granting the License. +- +- "Legal Entity" shall mean the union of the acting entity and all +- other entities that control, are controlled by, or are under common +- control with that entity. For the purposes of this definition, +- "control" means (i) the power, direct or indirect, to cause the +- direction or management of such entity, whether by contract or +- otherwise, or (ii) ownership of fifty percent (50%) or more of the +- outstanding shares, or (iii) beneficial ownership of such entity. +- +- "You" (or "Your") shall mean an individual or Legal Entity +- exercising permissions granted by this License. +- +- "Source" form shall mean the preferred form for making modifications, +- including but not limited to software source code, documentation +- source, and configuration files. +- +- "Object" form shall mean any form resulting from mechanical +- transformation or translation of a Source form, including but +- not limited to compiled object code, generated documentation, +- and conversions to other media types. +- +- "Work" shall mean the work of authorship, whether in Source or +- Object form, made available under the License, as indicated by a +- copyright notice that is included in or attached to the work +- (an example is provided in the Appendix below). +- +- "Derivative Works" shall mean any work, whether in Source or Object +- form, that is based on (or derived from) the Work and for which the +- editorial revisions, annotations, elaborations, or other modifications +- represent, as a whole, an original work of authorship. For the purposes +- of this License, Derivative Works shall not include works that remain +- separable from, or merely link (or bind by name) to the interfaces of, +- the Work and Derivative Works thereof. +- +- "Contribution" shall mean any work of authorship, including +- the original version of the Work and any modifications or additions +- to that Work or Derivative Works thereof, that is intentionally +- submitted to Licensor for inclusion in the Work by the copyright owner +- or by an individual or Legal Entity authorized to submit on behalf of +- the copyright owner. For the purposes of this definition, "submitted" +- means any form of electronic, verbal, or written communication sent +- to the Licensor or its representatives, including but not limited to +- communication on electronic mailing lists, source code control systems, +- and issue tracking systems that are managed by, or on behalf of, the +- Licensor for the purpose of discussing and improving the Work, but +- excluding communication that is conspicuously marked or otherwise +- designated in writing by the copyright owner as "Not a Contribution." +- +- "Contributor" shall mean Licensor and any individual or Legal Entity +- on behalf of whom a Contribution has been received by Licensor and +- subsequently incorporated within the Work. +- +- 2. Grant of Copyright License. Subject to the terms and conditions of +- this License, each Contributor hereby grants to You a perpetual, +- worldwide, non-exclusive, no-charge, royalty-free, irrevocable +- copyright license to reproduce, prepare Derivative Works of, +- publicly display, publicly perform, sublicense, and distribute the +- Work and such Derivative Works in Source or Object form. +- +- 3. Grant of Patent License. Subject to the terms and conditions of +- this License, each Contributor hereby grants to You a perpetual, +- worldwide, non-exclusive, no-charge, royalty-free, irrevocable +- (except as stated in this section) patent license to make, have made, +- use, offer to sell, sell, import, and otherwise transfer the Work, +- where such license applies only to those patent claims licensable +- by such Contributor that are necessarily infringed by their +- Contribution(s) alone or by combination of their Contribution(s) +- with the Work to which such Contribution(s) was submitted. If You +- institute patent litigation against any entity (including a +- cross-claim or counterclaim in a lawsuit) alleging that the Work +- or a Contribution incorporated within the Work constitutes direct +- or contributory patent infringement, then any patent licenses +- granted to You under this License for that Work shall terminate +- as of the date such litigation is filed. +- +- 4. Redistribution. You may reproduce and distribute copies of the +- Work or Derivative Works thereof in any medium, with or without +- modifications, and in Source or Object form, provided that You +- meet the following conditions: +- +- (a) You must give any other recipients of the Work or +- Derivative Works a copy of this License; and +- +- (b) You must cause any modified files to carry prominent notices +- stating that You changed the files; and +- +- (c) You must retain, in the Source form of any Derivative Works +- that You distribute, all copyright, patent, trademark, and +- attribution notices from the Source form of the Work, +- excluding those notices that do not pertain to any part of +- the Derivative Works; and +- +- (d) If the Work includes a "NOTICE" text file as part of its +- distribution, then any Derivative Works that You distribute must +- include a readable copy of the attribution notices contained +- within such NOTICE file, excluding those notices that do not +- pertain to any part of the Derivative Works, in at least one +- of the following places: within a NOTICE text file distributed +- as part of the Derivative Works; within the Source form or +- documentation, if provided along with the Derivative Works; or, +- within a display generated by the Derivative Works, if and +- wherever such third-party notices normally appear. The contents +- of the NOTICE file are for informational purposes only and +- do not modify the License. You may add Your own attribution +- notices within Derivative Works that You distribute, alongside +- or as an addendum to the NOTICE text from the Work, provided +- that such additional attribution notices cannot be construed +- as modifying the License. +- +- You may add Your own copyright statement to Your modifications and +- may provide additional or different license terms and conditions +- for use, reproduction, or distribution of Your modifications, or +- for any such Derivative Works as a whole, provided Your use, +- reproduction, and distribution of the Work otherwise complies with +- the conditions stated in this License. +- +- 5. Submission of Contributions. Unless You explicitly state otherwise, +- any Contribution intentionally submitted for inclusion in the Work +- by You to the Licensor shall be under the terms and conditions of +- this License, without any additional terms or conditions. +- Notwithstanding the above, nothing herein shall supersede or modify +- the terms of any separate license agreement you may have executed +- with Licensor regarding such Contributions. +- +- 6. Trademarks. This License does not grant permission to use the trade +- names, trademarks, service marks, or product names of the Licensor, +- except as required for reasonable and customary use in describing the +- origin of the Work and reproducing the content of the NOTICE file. +- +- 7. Disclaimer of Warranty. Unless required by applicable law or +- agreed to in writing, Licensor provides the Work (and each +- Contributor provides its Contributions) on an "AS IS" BASIS, +- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +- implied, including, without limitation, any warranties or conditions +- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +- PARTICULAR PURPOSE. You are solely responsible for determining the +- appropriateness of using or redistributing the Work and assume any +- risks associated with Your exercise of permissions under this License. +- +- 8. Limitation of Liability. In no event and under no legal theory, +- whether in tort (including negligence), contract, or otherwise, +- unless required by applicable law (such as deliberate and grossly +- negligent acts) or agreed to in writing, shall any Contributor be +- liable to You for damages, including any direct, indirect, special, +- incidental, or consequential damages of any character arising as a +- result of this License or out of the use or inability to use the +- Work (including but not limited to damages for loss of goodwill, +- work stoppage, computer failure or malfunction, or any and all +- other commercial damages or losses), even if such Contributor +- has been advised of the possibility of such damages. +- +- 9. Accepting Warranty or Additional Liability. While redistributing +- the Work or Derivative Works thereof, You may choose to offer, +- and charge a fee for, acceptance of support, warranty, indemnity, +- or other liability obligations and/or rights consistent with this +- License. However, in accepting such obligations, You may act only +- on Your own behalf and on Your sole responsibility, not on behalf +- of any other Contributor, and only if You agree to indemnify, +- defend, and hold each Contributor harmless for any liability +- incurred by, or claims asserted against, such Contributor by reason +- of your accepting any such warranty or additional liability. +- +- END OF TERMS AND CONDITIONS +- +- APPENDIX: How to apply the Apache License to your work. +- +- To apply the Apache License to your work, attach the following +- boilerplate notice, with the fields enclosed by brackets "[]" +- replaced with your own identifying information. (Don't include +- the brackets!) The text should be enclosed in the appropriate +- comment syntax for the file format. We also recommend that a +- file or class name and description of purpose be included on the +- same "printed page" as the copyright notice for easier +- identification within third-party archives. +- +- Copyright [yyyy] [name of copyright owner] +- +- Licensed under the Apache License, Version 2.0 (the "License"); +- you may not use this file except in compliance with the License. +- You may obtain a copy of the License at +- +- http://www.apache.org/licenses/LICENSE-2.0 +- +- Unless required by applicable law or agreed to in writing, software +- distributed under the License is distributed on an "AS IS" BASIS, +- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- See the License for the specific language governing permissions and +- limitations under the License. +diff --git a/pylib/cqlshlib/test/test_cql_parsing.py b/pylib/cqlshlib/test/test_cql_parsing.py +index ad60c9b..7376fbf 100644 +--- a/pylib/cqlshlib/test/test_cql_parsing.py ++++ b/pylib/cqlshlib/test/test_cql_parsing.py +@@ -15,7 +15,7 @@ + # limitations under the License. + + # to configure behavior, define $CQL_TEST_HOST to the destination address +-# for Thrift connections, and $CQL_TEST_PORT to the associated port. ++# and $CQL_TEST_PORT to the associated port. + + from unittest import TestCase + from operator import itemgetter +diff --git a/pylib/cqlshlib/test/test_cqlsh_commands.py b/pylib/cqlshlib/test/test_cqlsh_commands.py +index 0b12882..e4fe0bc 100644 +--- a/pylib/cqlshlib/test/test_cqlsh_commands.py ++++ b/pylib/cqlshlib/test/test_cqlsh_commands.py +@@ -15,7 +15,7 @@ + # limitations under the License. + + # to configure behavior, define $CQL_TEST_HOST to the destination address +-# for Thrift connections, and $CQL_TEST_PORT to the associated port. ++# and $CQL_TEST_PORT to the associated port. + + from .basecase import BaseTestCase, cqlsh + +diff --git a/pylib/cqlshlib/test/test_cqlsh_completion.py b/pylib/cqlshlib/test/test_cqlsh_completion.py +index 21eb088..67d4ffc 100644 +--- a/pylib/cqlshlib/test/test_cqlsh_completion.py ++++ b/pylib/cqlshlib/test/test_cqlsh_completion.py +@@ -15,7 +15,7 @@ + # limitations under the License. + + # to configure behavior, define $CQL_TEST_HOST to the destination address +-# for Thrift connections, and $CQL_TEST_PORT to the associated port. ++# and $CQL_TEST_PORT to the associated port. + + from __future__ import with_statement + +diff --git a/pylib/cqlshlib/test/test_cqlsh_invocation.py b/pylib/cqlshlib/test/test_cqlsh_invocation.py +index 67fa76f..8431980 100644 +--- a/pylib/cqlshlib/test/test_cqlsh_invocation.py ++++ b/pylib/cqlshlib/test/test_cqlsh_invocation.py +@@ -15,7 +15,7 @@ + # limitations under the License. + + # to configure behavior, define $CQL_TEST_HOST to the destination address +-# for Thrift connections, and $CQL_TEST_PORT to the associated port. ++# and $CQL_TEST_PORT to the associated port. + + from .basecase import BaseTestCase + +diff --git a/pylib/cqlshlib/test/test_cqlsh_output.py b/pylib/cqlshlib/test/test_cqlsh_output.py +index 8dba651..dcbb81b 100644 +--- a/pylib/cqlshlib/test/test_cqlsh_output.py ++++ b/pylib/cqlshlib/test/test_cqlsh_output.py +@@ -15,7 +15,7 @@ + # limitations under the License. + + # to configure behavior, define $CQL_TEST_HOST to the destination address +-# for Thrift connections, and $CQL_TEST_PORT to the associated port. ++# and $CQL_TEST_PORT to the associated port. + + from __future__ import with_statement + +diff --git a/pylib/cqlshlib/test/test_cqlsh_parsing.py b/pylib/cqlshlib/test/test_cqlsh_parsing.py +index 7e7f08b..9c37dd9 100644 +--- a/pylib/cqlshlib/test/test_cqlsh_parsing.py ++++ b/pylib/cqlshlib/test/test_cqlsh_parsing.py +@@ -15,7 +15,7 @@ + # limitations under the License. + + # to configure behavior, define $CQL_TEST_HOST to the destination address +-# for Thrift connections, and $CQL_TEST_PORT to the associated port. ++# and $CQL_TEST_PORT to the associated port. + + from unittest import TestCase + +diff --git a/src/java/org/apache/cassandra/auth/IAuthenticator.java b/src/java/org/apache/cassandra/auth/IAuthenticator.java +index ccbdb75..9eb50a7 100644 +--- a/src/java/org/apache/cassandra/auth/IAuthenticator.java ++++ b/src/java/org/apache/cassandra/auth/IAuthenticator.java +@@ -65,12 +65,10 @@ public interface IAuthenticator + SaslNegotiator newSaslNegotiator(InetAddress clientAddress); + + /** +- * For implementations which support the Thrift login method that accepts arbitrary +- * key/value pairs containing credentials data. +- * Also used by CQL native protocol v1, in which username and password are sent from +- * client to server in a {@link org.apache.cassandra.transport.messages.CredentialsMessage} +- * Implementations where support for Thrift and CQL protocol v1 is not required should make +- * this an unsupported operation. ++ * A legacy method that is still used by JMX authentication. ++ * ++ * You should implement this for having JMX authentication through your ++ * authenticator. + * + * Should never return null - always throw AuthenticationException instead. + * Returning AuthenticatedUser.ANONYMOUS_USER is an option as well if authentication is not required. +diff --git a/src/java/org/apache/cassandra/cache/CounterCacheKey.java b/src/java/org/apache/cassandra/cache/CounterCacheKey.java +index 8b173bf..0e5f037 100644 +--- a/src/java/org/apache/cassandra/cache/CounterCacheKey.java ++++ b/src/java/org/apache/cassandra/cache/CounterCacheKey.java +@@ -17,27 +17,42 @@ + */ + package org.apache.cassandra.cache; + ++import java.io.IOException; + import java.nio.ByteBuffer; + import java.util.Arrays; ++import java.util.List; + ++import org.apache.cassandra.config.CFMetaData; + import org.apache.cassandra.config.ColumnDefinition; + import org.apache.cassandra.db.*; ++import org.apache.cassandra.db.filter.ClusteringIndexFilter; ++import org.apache.cassandra.db.filter.ClusteringIndexNamesFilter; ++import org.apache.cassandra.db.filter.ColumnFilter; + import org.apache.cassandra.db.rows.CellPath; + import org.apache.cassandra.db.marshal.CompositeType; ++import org.apache.cassandra.db.rows.RowIterator; ++import org.apache.cassandra.db.rows.UnfilteredRowIterators; ++import org.apache.cassandra.io.util.DataInputPlus; ++import org.apache.cassandra.io.util.DataOutputPlus; + import org.apache.cassandra.utils.*; + + public final class CounterCacheKey extends CacheKey + { + private static final long EMPTY_SIZE = ObjectSizes.measure(new CounterCacheKey(null, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBuffer.allocate(1))); + +- public final byte[] partitionKey; +- public final byte[] cellName; ++ private final byte[] partitionKey; ++ private final byte[] cellName; + +- public CounterCacheKey(Pair ksAndCFName, ByteBuffer partitionKey, ByteBuffer cellName) ++ public CounterCacheKey(Pair ksAndCFName, byte[] partitionKey, byte[] cellName) + { + super(ksAndCFName); +- this.partitionKey = ByteBufferUtil.getArray(partitionKey); +- this.cellName = ByteBufferUtil.getArray(cellName); ++ this.partitionKey = partitionKey; ++ this.cellName = cellName; ++ } ++ ++ private CounterCacheKey(Pair ksAndCFName, ByteBuffer partitionKey, ByteBuffer cellName) ++ { ++ this(ksAndCFName, ByteBufferUtil.getArray(partitionKey), ByteBufferUtil.getArray(cellName)); + } + + public static CounterCacheKey create(Pair ksAndCFName, ByteBuffer partitionKey, Clustering clustering, ColumnDefinition c, CellPath path) +@@ -58,6 +73,75 @@ public final class CounterCacheKey extends CacheKey + return CompositeType.build(values); + } + ++ public ByteBuffer partitionKey() ++ { ++ return ByteBuffer.wrap(partitionKey); ++ } ++ ++ /** ++ * Reads the value of the counter represented by this key. ++ * ++ * @param cfs the store for the table this is a key of. ++ * @return the value for the counter represented by this key, or {@code null} if there ++ * is not such counter. ++ */ ++ public ByteBuffer readCounterValue(ColumnFamilyStore cfs) ++ { ++ CFMetaData metadata = cfs.metadata; ++ assert metadata.ksAndCFName.equals(ksAndCFName); ++ ++ DecoratedKey key = cfs.decorateKey(partitionKey()); ++ ++ int clusteringSize = metadata.comparator.size(); ++ List buffers = CompositeType.splitName(ByteBuffer.wrap(cellName)); ++ assert buffers.size() >= clusteringSize + 1; // See makeCellName above ++ ++ Clustering clustering = Clustering.make(buffers.subList(0, clusteringSize).toArray(new ByteBuffer[clusteringSize])); ++ ColumnDefinition column = metadata.getColumnDefinition(buffers.get(clusteringSize)); ++ // This can theoretically happen if a column is dropped after the cache is saved and we ++ // try to load it. Not point if failing in any case, just skip the value. ++ if (column == null) ++ return null; ++ ++ CellPath path = column.isComplex() ? CellPath.create(buffers.get(buffers.size() - 1)) : null; ++ ++ int nowInSec = FBUtilities.nowInSeconds(); ++ ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); ++ if (path == null) ++ builder.add(column); ++ else ++ builder.select(column, path); ++ ++ ClusteringIndexFilter filter = new ClusteringIndexNamesFilter(FBUtilities.singleton(clustering, metadata.comparator), false); ++ SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(metadata, nowInSec, key, builder.build(), filter); ++ try (ReadExecutionController controller = cmd.executionController(); ++ RowIterator iter = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec)) ++ { ++ ByteBuffer value = null; ++ if (column.isStatic()) ++ value = iter.staticRow().getCell(column).value(); ++ else if (iter.hasNext()) ++ value = iter.next().getCell(column).value(); ++ ++ return value; ++ } ++ } ++ ++ public void write(DataOutputPlus out) ++ throws IOException ++ { ++ ByteBufferUtil.writeWithLength(partitionKey, out); ++ ByteBufferUtil.writeWithLength(cellName, out); ++ } ++ ++ public static CounterCacheKey read(Pair ksAndCFName, DataInputPlus in) ++ throws IOException ++ { ++ return new CounterCacheKey(ksAndCFName, ++ ByteBufferUtil.readBytesWithLength(in), ++ ByteBufferUtil.readBytesWithLength(in)); ++ } ++ + public long unsharedHeapSize() + { + return EMPTY_SIZE +diff --git a/src/java/org/apache/cassandra/client/RingCache.java b/src/java/org/apache/cassandra/client/RingCache.java +deleted file mode 100644 +index 5196bce..0000000 +--- a/src/java/org/apache/cassandra/client/RingCache.java ++++ /dev/null +@@ -1,121 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.client; +- +-import java.io.IOException; +-import java.net.InetAddress; +-import java.net.UnknownHostException; +-import java.nio.ByteBuffer; +-import java.util.List; +- +-import org.apache.cassandra.dht.IPartitioner; +-import org.apache.cassandra.dht.Range; +-import org.apache.cassandra.dht.Token; +-import org.apache.cassandra.hadoop.ConfigHelper; +-import org.apache.cassandra.thrift.Cassandra; +-import org.apache.cassandra.thrift.TokenRange; +-import org.apache.hadoop.conf.Configuration; +-import org.apache.thrift.TException; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import com.google.common.collect.ArrayListMultimap; +-import com.google.common.collect.Multimap; +- +-/** +- * A class for caching the ring map at the client. For usage example, see +- * test/unit/org.apache.cassandra.client.TestRingCache.java. +- * TODO: doing a naive linear search of the token map +- */ +-public class RingCache +-{ +- final private static Logger logger = LoggerFactory.getLogger(RingCache.class); +- +- private final IPartitioner partitioner; +- private final Configuration conf; +- +- private Multimap, InetAddress> rangeMap; +- +- public RingCache(Configuration conf) +- { +- this.conf = conf; +- this.partitioner = ConfigHelper.getOutputPartitioner(conf); +- refreshEndpointMap(); +- } +- +- public void refreshEndpointMap() +- { +- try +- { +- Cassandra.Client client = ConfigHelper.getClientFromOutputAddressList(conf); +- +- String keyspace = ConfigHelper.getOutputKeyspace(conf); +- List ring = ConfigHelper.getOutputLocalDCOnly(conf) +- ? client.describe_local_ring(keyspace) +- : client.describe_ring(keyspace); +- rangeMap = ArrayListMultimap.create(); +- +- for (TokenRange range : ring) +- { +- Token left = partitioner.getTokenFactory().fromString(range.start_token); +- Token right = partitioner.getTokenFactory().fromString(range.end_token); +- Range r = new Range(left, right); +- for (String host : range.endpoints) +- { +- try +- { +- rangeMap.put(r, InetAddress.getByName(host)); +- } catch (UnknownHostException e) +- { +- throw new AssertionError(e); // host strings are IPs +- } +- } +- } +- } +- catch (IOException e) +- { +- throw new RuntimeException(e); +- } +- catch (TException e) +- { +- logger.trace("Error contacting seed list {} {}", ConfigHelper.getOutputInitialAddress(conf), e.getMessage()); +- } +- } +- +- /** ListMultimap promises to return a List for get(K) */ +- public List getEndpoint(Range range) +- { +- return (List) rangeMap.get(range); +- } +- +- public List getEndpoint(ByteBuffer key) +- { +- return getEndpoint(getRange(key)); +- } +- +- public Range getRange(ByteBuffer key) +- { +- // TODO: naive linear search of the token map +- Token t = partitioner.getToken(key); +- for (Range range : rangeMap.keySet()) +- if (range.contains(t)) +- return range; +- +- throw new RuntimeException("Invalid token information returned by describe_ring: " + rangeMap); +- } +-} +diff --git a/src/java/org/apache/cassandra/config/CFMetaData.java b/src/java/org/apache/cassandra/config/CFMetaData.java +index 4de4f7b..1f5855f 100644 +--- a/src/java/org/apache/cassandra/config/CFMetaData.java ++++ b/src/java/org/apache/cassandra/config/CFMetaData.java +@@ -92,8 +92,6 @@ public final class CFMetaData + public volatile ClusteringComparator comparator; // bytes, long, timeuuid, utf8, etc. This is built directly from clusteringColumns + public final IPartitioner partitioner; // partitioner the table uses + +- private final Serializers serializers; +- + // non-final, for now + public volatile TableParams params = TableParams.DEFAULT; + +@@ -293,7 +291,6 @@ public final class CFMetaData + this.clusteringColumns = clusteringColumns; + this.partitionColumns = partitionColumns; + +- this.serializers = new Serializers(this); + this.resource = DataResource.table(ksName, cfName); + rebuild(); + } +@@ -553,15 +550,6 @@ public final class CFMetaData + return columnMetadata; + } + +- /** +- * +- * @return The name of the parent cf if this is a seconday index +- */ +- public String getParentColumnFamilyName() +- { +- return isIndex ? cfName.substring(0, cfName.indexOf('.')) : null; +- } +- + public ReadRepairDecision newReadRepairDecision() + { + double chance = ThreadLocalRandom.current().nextDouble(); +@@ -577,7 +565,7 @@ public final class CFMetaData + public AbstractType getColumnDefinitionNameComparator(ColumnDefinition.Kind kind) + { + return (isSuper() && kind == ColumnDefinition.Kind.REGULAR) || (isStaticCompactTable() && kind == ColumnDefinition.Kind.STATIC) +- ? thriftColumnNameType() ++ ? staticCompactOrSuperTableColumnNameType() + : UTF8Type.instance; + } + +@@ -593,7 +581,7 @@ public final class CFMetaData + + // An iterator over all column definitions but that respect the order of a SELECT *. + // This also "hide" the clustering/regular columns for a non-CQL3 non-dense table for backward compatibility +- // sake (those are accessible through thrift but not through CQL currently). ++ // sake. + public Iterator allColumnsInSelectOrder() + { + final boolean isStaticCompactTable = isStaticCompactTable(); +@@ -757,9 +745,6 @@ public final class CFMetaData + + rebuild(); + +- // compaction thresholds are checked by ThriftValidation. We shouldn't be doing +- // validation on the apply path; it's too late for that. +- + params = cfm.params; + + keyValidator = cfm.keyValidator; +@@ -897,10 +882,17 @@ public final class CFMetaData + return this; + } + +- +- +- // The comparator to validate the definition name with thrift. +- public AbstractType thriftColumnNameType() ++ /** ++ * The type to use to compare column names in "static compact" ++ * tables or superColum ones. ++ *

++ * This exists because for historical reasons, "static compact" tables as ++ * well as super column ones can have non-UTF8 column names. ++ *

++ * This method should only be called for superColumn tables and "static ++ * compact" ones. For any other table, all column names are UTF8. ++ */ ++ public AbstractType staticCompactOrSuperTableColumnNameType() + { + if (isSuper()) + { +@@ -964,9 +956,9 @@ public final class CFMetaData + return removed; + } + +- public void recordColumnDrop(ColumnDefinition def) ++ public void recordColumnDrop(ColumnDefinition def, long timeMicros) + { +- droppedColumns.put(def.name.bytes, new DroppedColumn(def.name.toString(), def.type, FBUtilities.timestampMicros())); ++ droppedColumns.put(def.name.bytes, new DroppedColumn(def.name.toString(), def.type, timeMicros)); + } + + public void renameColumn(ColumnIdentifier from, ColumnIdentifier to) throws InvalidRequestException +@@ -978,7 +970,7 @@ public final class CFMetaData + if (getColumnDefinition(to) != null) + throw new InvalidRequestException(String.format("Cannot rename column %s to %s in keyspace %s; another column of that name already exist", from, to, cfName)); + +- if (def.isPartOfCellName(isCQLTable(), isSuper())) ++ if (!def.isPrimaryKeyColumn()) + { + throw new InvalidRequestException(String.format("Cannot rename non PRIMARY KEY part %s", from)); + } +@@ -1007,6 +999,18 @@ public final class CFMetaData + removeColumnDefinition(def); + } + ++ /** ++ * Records a deprecated column for a system table. ++ */ ++ public CFMetaData recordDeprecatedSystemColumn(String name, AbstractType type) ++ { ++ // As we play fast and loose with the removal timestamp, make sure this is misued for a non system table. ++ assert Schema.isSystemKeyspace(ksName); ++ ByteBuffer bb = ByteBufferUtil.bytes(name); ++ recordColumnDrop(ColumnDefinition.regularDef(this, bb, type), Long.MAX_VALUE); ++ return this; ++ } ++ + public boolean isCQLTable() + { + return !isSuper() && !isDense() && isCompound(); +@@ -1022,43 +1026,11 @@ public final class CFMetaData + return !isSuper() && !isDense() && !isCompound(); + } + +- /** +- * Returns whether this CFMetaData can be returned to thrift. +- */ +- public boolean isThriftCompatible() +- { +- return isCompactTable(); +- } +- + public boolean hasStaticColumns() + { + return !partitionColumns.statics.isEmpty(); + } + +- public boolean hasCollectionColumns() +- { +- for (ColumnDefinition def : partitionColumns()) +- if (def.type instanceof CollectionType && def.type.isMultiCell()) +- return true; +- return false; +- } +- +- public boolean hasComplexColumns() +- { +- for (ColumnDefinition def : partitionColumns()) +- if (def.isComplex()) +- return true; +- return false; +- } +- +- public boolean hasDroppedCollectionColumns() +- { +- for (DroppedColumn def : getDroppedColumns().values()) +- if (def.type instanceof CollectionType && def.type.isMultiCell()) +- return true; +- return false; +- } +- + public boolean isSuper() + { + return isSuper; +@@ -1087,18 +1059,6 @@ public final class CFMetaData + return isView; + } + +- public Serializers serializers() +- { +- return serializers; +- } +- +- public AbstractType makeLegacyDefaultValidator() +- { +- return isCounter() +- ? CounterColumnType.instance +- : (isCompactTable() ? compactValueColumn().type : BytesType.instance); +- } +- + public static Set flagsFromStrings(Set strings) + { + return strings.stream() +diff --git a/src/java/org/apache/cassandra/config/ColumnDefinition.java b/src/java/org/apache/cassandra/config/ColumnDefinition.java +index 713d684..6cc6aaa 100644 +--- a/src/java/org/apache/cassandra/config/ColumnDefinition.java ++++ b/src/java/org/apache/cassandra/config/ColumnDefinition.java +@@ -54,8 +54,6 @@ public class ColumnDefinition extends ColumnSpecification implements Selectable, + * those parts of the clustering columns and amongst the others, regular and + * static ones. + * +- * Note that thrift only knows about definitions of type REGULAR (and +- * the ones whose position == NO_POSITION (-1)). + */ + public enum Kind + { +@@ -308,23 +306,6 @@ public class ColumnDefinition extends ColumnSpecification implements Selectable, + } + + /** +- * Whether the name of this definition is serialized in the cell nane, i.e. whether +- * it's not just a non-stored CQL metadata. +- */ +- public boolean isPartOfCellName(boolean isCQL3Table, boolean isSuper) +- { +- // When converting CQL3 tables to thrift, any regular or static column ends up in the cell name. +- // When it's a compact table however, the REGULAR definition is the name for the cell value of "dynamic" +- // column (so it's not part of the cell name) and it's static columns that ends up in the cell name. +- if (isCQL3Table) +- return kind == Kind.REGULAR || kind == Kind.STATIC; +- else if (isSuper) +- return kind == Kind.REGULAR; +- else +- return kind == Kind.STATIC; +- } +- +- /** + * Converts the specified column definitions into column identifiers. + * + * @param definitions the column definitions to convert. +@@ -442,7 +423,7 @@ public class ColumnDefinition extends ColumnSpecification implements Selectable, + } + + /** +- * Because Thrift-created tables may have a non-text comparator, we cannot determine the proper 'key' until ++ * Because legacy-created tables may have a non-text comparator, we cannot determine the proper 'key' until + * we know the comparator. ColumnDefinition.Raw is a placeholder that can be converted to a real ColumnIdentifier + * once the comparator is known with prepare(). This should only be used with identifiers that are actual + * column names. See CASSANDRA-8178 for more background. +@@ -522,19 +503,19 @@ public class ColumnDefinition extends ColumnSpecification implements Selectable, + if (!cfm.isStaticCompactTable()) + return ColumnIdentifier.getInterned(text, true); + +- AbstractType thriftColumnNameType = cfm.thriftColumnNameType(); +- if (thriftColumnNameType instanceof UTF8Type) ++ AbstractType columnNameType = cfm.staticCompactOrSuperTableColumnNameType(); ++ if (columnNameType instanceof UTF8Type) + return ColumnIdentifier.getInterned(text, true); + +- // We have a Thrift-created table with a non-text comparator. Check if we have a match column, otherwise assume we should use +- // thriftColumnNameType ++ // We have a legacy-created table with a non-text comparator. Check if we have a matching column, otherwise assume we should use ++ // columnNameType + ByteBuffer bufferName = ByteBufferUtil.bytes(text); + for (ColumnDefinition def : cfm.allColumns()) + { + if (def.name.bytes.equals(bufferName)) + return def.name; + } +- return ColumnIdentifier.getInterned(thriftColumnNameType.fromString(text), text); ++ return ColumnIdentifier.getInterned(columnNameType.fromString(text), text); + } + + public ColumnDefinition prepare(CFMetaData cfm) +@@ -542,19 +523,19 @@ public class ColumnDefinition extends ColumnSpecification implements Selectable, + if (!cfm.isStaticCompactTable()) + return find(cfm); + +- AbstractType thriftColumnNameType = cfm.thriftColumnNameType(); +- if (thriftColumnNameType instanceof UTF8Type) ++ AbstractType columnNameType = cfm.staticCompactOrSuperTableColumnNameType(); ++ if (columnNameType instanceof UTF8Type) + return find(cfm); + +- // We have a Thrift-created table with a non-text comparator. Check if we have a match column, otherwise assume we should use +- // thriftColumnNameType ++ // We have a legacy-created table with a non-text comparator. Check if we have a match column, otherwise assume we should use ++ // columnNameType + ByteBuffer bufferName = ByteBufferUtil.bytes(text); + for (ColumnDefinition def : cfm.allColumns()) + { + if (def.name.bytes.equals(bufferName)) + return def; + } +- return find(thriftColumnNameType.fromString(text), cfm); ++ return find(columnNameType.fromString(text), cfm); + } + + private ColumnDefinition find(CFMetaData cfm) +diff --git a/src/java/org/apache/cassandra/config/Config.java b/src/java/org/apache/cassandra/config/Config.java +index e6ff98c..46e79b9 100644 +--- a/src/java/org/apache/cassandra/config/Config.java ++++ b/src/java/org/apache/cassandra/config/Config.java +@@ -127,21 +127,11 @@ public class Config + public Boolean listen_on_broadcast_address = false; + public String internode_authenticator; + +- /* intentionally left set to true, despite being set to false in stock 2.2 cassandra.yaml +- we don't want to surprise Thrift users who have the setting blank in the yaml during 2.1->2.2 upgrade */ +- public Boolean start_rpc = true; + public String rpc_address; + public String rpc_interface; + public Boolean rpc_interface_prefer_ipv6 = false; + public String broadcast_rpc_address; +- public Integer rpc_port = 9160; +- public Integer rpc_listen_backlog = 50; +- public String rpc_server_type = "sync"; + public Boolean rpc_keepalive = true; +- public Integer rpc_min_threads = 16; +- public Integer rpc_max_threads = Integer.MAX_VALUE; +- public Integer rpc_send_buff_size_in_bytes; +- public Integer rpc_recv_buff_size_in_bytes; + public Integer internode_send_buff_size_in_bytes; + public Integer internode_recv_buff_size_in_bytes; + +@@ -153,8 +143,6 @@ public class Config + public volatile Long native_transport_max_concurrent_connections = -1L; + public volatile Long native_transport_max_concurrent_connections_per_ip = -1L; + +- @Deprecated +- public Integer thrift_max_message_length_in_mb = 16; + /** + * Max size of values in SSTables, in MegaBytes. + * Default is the same as the native protocol frame limit: 256Mb. +@@ -162,7 +150,6 @@ public class Config + */ + public Integer max_value_size_in_mb = 256; + +- public Integer thrift_framed_transport_size_in_mb = 15; + public Boolean snapshot_before_compaction = false; + public Boolean auto_snapshot = true; + +@@ -218,10 +205,6 @@ public class Config + public Integer dynamic_snitch_reset_interval_in_ms = 600000; + public Double dynamic_snitch_badness_threshold = 0.1; + +- public String request_scheduler; +- public RequestSchedulerId request_scheduler_id; +- public RequestSchedulerOptions request_scheduler_options; +- + public ServerEncryptionOptions server_encryption_options = new ServerEncryptionOptions(); + public ClientEncryptionOptions client_encryption_options = new ClientEncryptionOptions(); + // this encOptions is for backward compatibility (a warning is logged by DatabaseDescriptor) +@@ -311,11 +294,6 @@ public class Config + * Defaults to 1/256th of the heap size or 10MB, whichever is greater. + */ + public Long prepared_statements_cache_size_mb = null; +- /** +- * Size of the Thrift prepared statements cache in MB. +- * Defaults to 1/256th of the heap size or 10MB, whichever is greater. +- */ +- public Long thrift_prepared_statements_cache_size_mb = null; + + public boolean enable_user_defined_functions = false; + public boolean enable_scripted_user_defined_functions = false; +@@ -420,11 +398,6 @@ public class Config + die_immediate + } + +- public enum RequestSchedulerId +- { +- keyspace +- } +- + public enum DiskOptimizationStrategy + { + ssd, +diff --git a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java +index 6e08599..6431086 100644 +--- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java ++++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java +@@ -37,7 +37,6 @@ import org.slf4j.LoggerFactory; + + import org.apache.cassandra.auth.*; + import org.apache.cassandra.config.Config.CommitLogSync; +-import org.apache.cassandra.config.Config.RequestSchedulerId; + import org.apache.cassandra.config.EncryptionOptions.ClientEncryptionOptions; + import org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions; + import org.apache.cassandra.db.ColumnFamilyStore; +@@ -49,11 +48,8 @@ import org.apache.cassandra.io.sstable.format.SSTableFormat; + import org.apache.cassandra.io.util.FileUtils; + import org.apache.cassandra.locator.*; + import org.apache.cassandra.net.MessagingService; +-import org.apache.cassandra.scheduler.IRequestScheduler; +-import org.apache.cassandra.scheduler.NoScheduler; + import org.apache.cassandra.security.EncryptionContext; + import org.apache.cassandra.service.CacheService; +-import org.apache.cassandra.thrift.ThriftServer; + import org.apache.cassandra.utils.FBUtilities; + import org.apache.cassandra.utils.memory.*; + import org.apache.commons.lang3.StringUtils; +@@ -92,12 +88,7 @@ public class DatabaseDescriptor + // depend on the configured IAuthenticator, so defer creating it until that's been set. + private static IRoleManager roleManager; + +- private static IRequestScheduler requestScheduler; +- private static RequestSchedulerId requestSchedulerId; +- private static RequestSchedulerOptions requestSchedulerOptions; +- + private static long preparedStatementsCacheSizeInMB; +- private static long thriftPreparedStatementsCacheSizeInMB; + + private static long keyCacheSizeInMB; + private static long counterCacheSizeInMB; +@@ -442,21 +433,9 @@ public class DatabaseDescriptor + + applyAddressConfig(config); + +- if (conf.thrift_framed_transport_size_in_mb <= 0) +- throw new ConfigurationException("thrift_framed_transport_size_in_mb must be positive, but was " + conf.thrift_framed_transport_size_in_mb, false); +- + if (conf.native_transport_max_frame_size_in_mb <= 0) + throw new ConfigurationException("native_transport_max_frame_size_in_mb must be positive, but was " + conf.native_transport_max_frame_size_in_mb, false); + +- // fail early instead of OOMing (see CASSANDRA-8116) +- if (ThriftServer.HSHA.equals(conf.rpc_server_type) && conf.rpc_max_threads == Integer.MAX_VALUE) +- throw new ConfigurationException("The hsha rpc_server_type is not compatible with an rpc_max_threads " + +- "setting of 'unlimited'. Please see the comments in cassandra.yaml " + +- "for rpc_server_type and rpc_max_threads.", +- false); +- if (ThriftServer.HSHA.equals(conf.rpc_server_type) && conf.rpc_max_threads > (FBUtilities.getAvailableProcessors() * 2 + 1024)) +- logger.warn("rpc_max_threads setting of {} may be too high for the hsha server and cause unnecessary thread contention, reducing performance", conf.rpc_max_threads); +- + /* end point snitch */ + if (conf.endpoint_snitch == null) + { +@@ -480,43 +459,6 @@ public class DatabaseDescriptor + } + }; + +- /* Request Scheduler setup */ +- requestSchedulerOptions = conf.request_scheduler_options; +- if (conf.request_scheduler != null) +- { +- try +- { +- if (requestSchedulerOptions == null) +- { +- requestSchedulerOptions = new RequestSchedulerOptions(); +- } +- Class cls = Class.forName(conf.request_scheduler); +- requestScheduler = (IRequestScheduler) cls.getConstructor(RequestSchedulerOptions.class).newInstance(requestSchedulerOptions); +- } +- catch (ClassNotFoundException e) +- { +- throw new ConfigurationException("Invalid Request Scheduler class " + conf.request_scheduler, false); +- } +- catch (Exception e) +- { +- throw new ConfigurationException("Unable to instantiate request scheduler", e); +- } +- } +- else +- { +- requestScheduler = new NoScheduler(); +- } +- +- if (conf.request_scheduler_id == RequestSchedulerId.keyspace) +- { +- requestSchedulerId = conf.request_scheduler_id; +- } +- else +- { +- // Default to Keyspace +- requestSchedulerId = RequestSchedulerId.keyspace; +- } +- + // if data dirs, commitlog dir, or saved caches dir are set in cassandra.yaml, use that. Otherwise, + // use -Dcassandra.storagedir (set in cassandra-env.sh) as the parent dir for data/, commitlog/, and saved_caches/ + if (conf.commitlog_directory == null) +@@ -704,22 +646,6 @@ public class DatabaseDescriptor + + try + { +- // if thrift_prepared_statements_cache_size_mb option was set to "auto" then size of the cache should be "max(1/256 of Heap (in MB), 10MB)" +- thriftPreparedStatementsCacheSizeInMB = (conf.thrift_prepared_statements_cache_size_mb == null) +- ? Math.max(10, (int) (Runtime.getRuntime().maxMemory() / 1024 / 1024 / 256)) +- : conf.thrift_prepared_statements_cache_size_mb; +- +- if (thriftPreparedStatementsCacheSizeInMB <= 0) +- throw new NumberFormatException(); // to escape duplicating error message +- } +- catch (NumberFormatException e) +- { +- throw new ConfigurationException("thrift_prepared_statements_cache_size_mb option was set incorrectly to '" +- + conf.thrift_prepared_statements_cache_size_mb + "', supported values are >= 0.", false); +- } +- +- try +- { + // if key_cache_size_in_mb option was set to "auto" then size of the cache should be "min(5% of Heap (in MB), 100MB) + keyCacheSizeInMB = (conf.key_cache_size_in_mb == null) + ? Math.min(Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024)), 100) +@@ -951,11 +877,6 @@ public class DatabaseDescriptor + return conf.credentials_cache_max_entries = maxEntries; + } + +- public static int getThriftFramedTransportSize() +- { +- return conf.thrift_framed_transport_size_in_mb * 1024 * 1024; +- } +- + public static int getMaxValueSize() + { + return conf.max_value_size_in_mb * 1024 * 1024; +@@ -1035,21 +956,6 @@ public class DatabaseDescriptor + snitch = eps; + } + +- public static IRequestScheduler getRequestScheduler() +- { +- return requestScheduler; +- } +- +- public static RequestSchedulerOptions getRequestSchedulerOptions() +- { +- return requestSchedulerOptions; +- } +- +- public static RequestSchedulerId getRequestSchedulerId() +- { +- return requestSchedulerId; +- } +- + public static int getColumnIndexSize() + { + return conf.column_index_size_in_kb * 1024; +@@ -1183,16 +1089,6 @@ public class DatabaseDescriptor + return Integer.parseInt(System.getProperty("cassandra.ssl_storage_port", conf.ssl_storage_port.toString())); + } + +- public static int getRpcPort() +- { +- return Integer.parseInt(System.getProperty("cassandra.rpc_port", conf.rpc_port.toString())); +- } +- +- public static int getRpcListenBacklog() +- { +- return conf.rpc_listen_backlog; +- } +- + public static long getRpcTimeout() + { + return conf.request_timeout_in_ms; +@@ -1500,11 +1396,6 @@ public class DatabaseDescriptor + broadcastAddress = broadcastAdd; + } + +- public static boolean startRpc() +- { +- return conf.start_rpc; +- } +- + public static InetAddress getRpcAddress() + { + return rpcAddress; +@@ -1523,36 +1414,11 @@ public class DatabaseDescriptor + return broadcastRpcAddress; + } + +- public static String getRpcServerType() +- { +- return conf.rpc_server_type; +- } +- + public static boolean getRpcKeepAlive() + { + return conf.rpc_keepalive; + } + +- public static Integer getRpcMinThreads() +- { +- return conf.rpc_min_threads; +- } +- +- public static Integer getRpcMaxThreads() +- { +- return conf.rpc_max_threads; +- } +- +- public static Integer getRpcSendBufferSize() +- { +- return conf.rpc_send_buff_size_in_bytes; +- } +- +- public static Integer getRpcRecvBufferSize() +- { +- return conf.rpc_recv_buff_size_in_bytes; +- } +- + public static Integer getInternodeSendBufferSize() + { + return conf.internode_send_buff_size_in_bytes; +@@ -2122,11 +1988,6 @@ public class DatabaseDescriptor + return preparedStatementsCacheSizeInMB; + } + +- public static long getThriftPreparedStatementsCacheSizeMB() +- { +- return thriftPreparedStatementsCacheSizeInMB; +- } +- + public static boolean enableUserDefinedFunctions() + { + return conf.enable_user_defined_functions; +diff --git a/src/java/org/apache/cassandra/config/RequestSchedulerOptions.java b/src/java/org/apache/cassandra/config/RequestSchedulerOptions.java +deleted file mode 100644 +index dacf405..0000000 +--- a/src/java/org/apache/cassandra/config/RequestSchedulerOptions.java ++++ /dev/null +@@ -1,33 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.config; +- +-import java.util.Map; +- +-/** +- * +- */ +-public class RequestSchedulerOptions +-{ +- public static final Integer DEFAULT_THROTTLE_LIMIT = 80; +- public static final Integer DEFAULT_WEIGHT = 1; +- +- public Integer throttle_limit = DEFAULT_THROTTLE_LIMIT; +- public Integer default_weight = DEFAULT_WEIGHT; +- public Map weights; +-} +diff --git a/src/java/org/apache/cassandra/cql3/CQL3Type.java b/src/java/org/apache/cassandra/cql3/CQL3Type.java +index cf7e18a..d8b95cf 100644 +--- a/src/java/org/apache/cassandra/cql3/CQL3Type.java ++++ b/src/java/org/apache/cassandra/cql3/CQL3Type.java +@@ -651,7 +651,7 @@ public interface CQL3Type + if (!frozen && values.supportsFreezing() && !values.frozen) + throwNestedNonFrozenError(values); + +- // we represent Thrift supercolumns as maps, internally, and we do allow counters in supercolumns. Thus, ++ // we represent supercolumns as maps, internally, and we do allow counters in supercolumns. Thus, + // for internal type parsing (think schema) we have to make an exception and allow counters as (map) values + if (values.isCounter() && !isInternal) + throw new InvalidRequestException("Counters are not allowed inside collections: " + this); +diff --git a/src/java/org/apache/cassandra/cql3/CustomPayloadMirroringQueryHandler.java b/src/java/org/apache/cassandra/cql3/CustomPayloadMirroringQueryHandler.java +index 02a6df9..80c125b 100644 +--- a/src/java/org/apache/cassandra/cql3/CustomPayloadMirroringQueryHandler.java ++++ b/src/java/org/apache/cassandra/cql3/CustomPayloadMirroringQueryHandler.java +@@ -57,11 +57,6 @@ public class CustomPayloadMirroringQueryHandler implements QueryHandler + return queryProcessor.getPrepared(id); + } + +- public ParsedStatement.Prepared getPreparedForThrift(Integer id) +- { +- return queryProcessor.getPreparedForThrift(id); +- } +- + public ResultMessage processPrepared(CQLStatement statement, + QueryState state, + QueryOptions options, +diff --git a/src/java/org/apache/cassandra/cql3/QueryHandler.java b/src/java/org/apache/cassandra/cql3/QueryHandler.java +index 3c11c0e..a2ce406 100644 +--- a/src/java/org/apache/cassandra/cql3/QueryHandler.java ++++ b/src/java/org/apache/cassandra/cql3/QueryHandler.java +@@ -41,8 +41,6 @@ public interface QueryHandler + + ParsedStatement.Prepared getPrepared(MD5Digest id); + +- ParsedStatement.Prepared getPreparedForThrift(Integer id); +- + ResultMessage processPrepared(CQLStatement statement, + QueryState state, + QueryOptions options, +diff --git a/src/java/org/apache/cassandra/cql3/QueryOptions.java b/src/java/org/apache/cassandra/cql3/QueryOptions.java +index e6cda89..f26d7dc 100644 +--- a/src/java/org/apache/cassandra/cql3/QueryOptions.java ++++ b/src/java/org/apache/cassandra/cql3/QueryOptions.java +@@ -51,11 +51,6 @@ public abstract class QueryOptions + // A cache of bind values parsed as JSON, see getJsonColumnValue for details. + private List> jsonValuesCache; + +- public static QueryOptions fromThrift(ConsistencyLevel consistency, List values) +- { +- return new DefaultQueryOptions(consistency, values, false, SpecificOptions.DEFAULT, Server.VERSION_3); +- } +- + public static QueryOptions forInternalCalls(ConsistencyLevel consistency, List values) + { + return new DefaultQueryOptions(consistency, values, false, SpecificOptions.DEFAULT, Server.VERSION_3); +@@ -177,8 +172,7 @@ public abstract class QueryOptions + } + + /** +- * The protocol version for the query. Will be 3 if the object don't come from +- * a native protocol request (i.e. it's been allocated locally or by CQL-over-thrift). ++ * The protocol version for the query. + */ + public abstract int getProtocolVersion(); + +diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java b/src/java/org/apache/cassandra/cql3/QueryProcessor.java +index 222204b..ac4d977 100644 +--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java ++++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java +@@ -50,12 +50,13 @@ import org.apache.cassandra.exceptions.*; + import org.apache.cassandra.metrics.CQLMetrics; + import org.apache.cassandra.service.*; + import org.apache.cassandra.service.pager.QueryPager; +-import org.apache.cassandra.thrift.ThriftClientState; + import org.apache.cassandra.tracing.Tracing; + import org.apache.cassandra.transport.Server; + import org.apache.cassandra.transport.messages.ResultMessage; + import org.apache.cassandra.utils.*; + ++import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue; ++ + public class QueryProcessor implements QueryHandler + { + public static final CassandraVersion CQL_VERSION = new CassandraVersion("3.4.2"); +@@ -65,7 +66,6 @@ public class QueryProcessor implements QueryHandler + private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class); + + private static final ConcurrentLinkedHashMap preparedStatements; +- private static final ConcurrentLinkedHashMap thriftPreparedStatements; + + // A map for prepared statements used internally (which we don't want to mix with user statement, in particular we don't + // bother with expiration on those. +@@ -76,7 +76,6 @@ public class QueryProcessor implements QueryHandler + public static final CQLMetrics metrics = new CQLMetrics(); + + private static final AtomicInteger lastMinuteEvictionsCount = new AtomicInteger(0); +- private static final AtomicInteger thriftLastMinuteEvictionsCount = new AtomicInteger(0); + + static + { +@@ -88,31 +87,16 @@ public class QueryProcessor implements QueryHandler + lastMinuteEvictionsCount.incrementAndGet(); + }).build(); + +- thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder() +- .maximumWeightedCapacity(capacityToBytes(DatabaseDescriptor.getThriftPreparedStatementsCacheSizeMB())) +- .weigher(QueryProcessor::measure) +- .listener((integer, prepared) -> { +- metrics.preparedStatementsEvicted.inc(); +- thriftLastMinuteEvictionsCount.incrementAndGet(); +- }) +- .build(); +- + ScheduledExecutors.scheduledTasks.scheduleAtFixedRate(() -> { + long count = lastMinuteEvictionsCount.getAndSet(0); + if (count > 0) + logger.warn("{} prepared statements discarded in the last minute because cache limit reached ({} MB)", + count, + DatabaseDescriptor.getPreparedStatementsCacheSizeMB()); +- count = thriftLastMinuteEvictionsCount.getAndSet(0); +- if (count > 0) +- logger.warn("{} prepared Thrift statements discarded in the last minute because cache limit reached ({} MB)", +- count, +- DatabaseDescriptor.getThriftPreparedStatementsCacheSizeMB()); + }, 1, 1, TimeUnit.MINUTES); + +- logger.info("Initialized prepared statement caches with {} MB (native) and {} MB (Thrift)", +- DatabaseDescriptor.getPreparedStatementsCacheSizeMB(), +- DatabaseDescriptor.getThriftPreparedStatementsCacheSizeMB()); ++ logger.info("Initialized prepared statement caches with {} MB", ++ DatabaseDescriptor.getPreparedStatementsCacheSizeMB()); + } + + private static long capacityToBytes(long cacheSizeMB) +@@ -122,7 +106,7 @@ public class QueryProcessor implements QueryHandler + + public static int preparedStatementsCount() + { +- return preparedStatements.size() + thriftPreparedStatements.size(); ++ return preparedStatements.size(); + } + + // Work around initialization dependency +@@ -155,11 +139,6 @@ public class QueryProcessor implements QueryHandler + return preparedStatements.get(id); + } + +- public ParsedStatement.Prepared getPreparedForThrift(Integer id) +- { +- return thriftPreparedStatements.get(id); +- } +- + public static void validateKey(ByteBuffer key) throws InvalidRequestException + { + if (key == null || key.remaining() == 0) +@@ -371,12 +350,12 @@ public class QueryProcessor implements QueryHandler + public ResultMessage.Prepared prepare(String queryString, QueryState queryState) + { + ClientState cState = queryState.getClientState(); +- return prepare(queryString, cState, cState instanceof ThriftClientState); ++ return prepare(queryString, cState); + } + +- public static ResultMessage.Prepared prepare(String queryString, ClientState clientState, boolean forThrift) ++ public static ResultMessage.Prepared prepare(String queryString, ClientState clientState) + { +- ResultMessage.Prepared existing = getStoredPreparedStatement(queryString, clientState.getRawKeyspace(), forThrift); ++ ResultMessage.Prepared existing = getStoredPreparedStatement(queryString, clientState.getRawKeyspace()); + if (existing != null) + return existing; + +@@ -387,7 +366,7 @@ public class QueryProcessor implements QueryHandler + throw new InvalidRequestException(String.format("Too many markers(?). %d markers exceed the allowed maximum of %d", boundTerms, FBUtilities.MAX_UNSIGNED_SHORT)); + assert boundTerms == prepared.boundNames.size(); + +- return storePreparedStatement(queryString, clientState.getRawKeyspace(), prepared, forThrift); ++ return storePreparedStatement(queryString, clientState.getRawKeyspace(), prepared); + } + + private static MD5Digest computeId(String queryString, String keyspace) +@@ -396,58 +375,33 @@ public class QueryProcessor implements QueryHandler + return MD5Digest.compute(toHash); + } + +- private static Integer computeThriftId(String queryString, String keyspace) +- { +- String toHash = keyspace == null ? queryString : keyspace + queryString; +- return toHash.hashCode(); +- } +- +- private static ResultMessage.Prepared getStoredPreparedStatement(String queryString, String keyspace, boolean forThrift) ++ private static ResultMessage.Prepared getStoredPreparedStatement(String queryString, String keyspace) + throws InvalidRequestException + { +- if (forThrift) +- { +- Integer thriftStatementId = computeThriftId(queryString, keyspace); +- ParsedStatement.Prepared existing = thriftPreparedStatements.get(thriftStatementId); +- return existing == null ? null : ResultMessage.Prepared.forThrift(thriftStatementId, existing.boundNames); +- } +- else +- { +- MD5Digest statementId = computeId(queryString, keyspace); +- ParsedStatement.Prepared existing = preparedStatements.get(statementId); +- return existing == null ? null : new ResultMessage.Prepared(statementId, existing); +- } ++ MD5Digest statementId = computeId(queryString, keyspace); ++ ParsedStatement.Prepared existing = preparedStatements.get(statementId); ++ if (existing == null) ++ return null; ++ checkTrue(queryString.equals(existing.rawCQLStatement), ++ String.format("MD5 hash collision: query with the same MD5 hash was already prepared. \n Existing: '%s'", existing.rawCQLStatement)); ++ return new ResultMessage.Prepared(statementId, existing); + } + +- private static ResultMessage.Prepared storePreparedStatement(String queryString, String keyspace, ParsedStatement.Prepared prepared, boolean forThrift) ++ private static ResultMessage.Prepared storePreparedStatement(String queryString, String keyspace, ParsedStatement.Prepared prepared) + throws InvalidRequestException + { + // Concatenate the current keyspace so we don't mix prepared statements between keyspace (#5352). + // (if the keyspace is null, queryString has to have a fully-qualified keyspace so it's fine. + long statementSize = ObjectSizes.measureDeep(prepared.statement); + // don't execute the statement if it's bigger than the allowed threshold +- if (forThrift) +- { +- if (statementSize > capacityToBytes(DatabaseDescriptor.getThriftPreparedStatementsCacheSizeMB())) +- throw new InvalidRequestException(String.format("Prepared statement of size %d bytes is larger than allowed maximum of %d MB: %s...", +- statementSize, +- DatabaseDescriptor.getThriftPreparedStatementsCacheSizeMB(), +- queryString.substring(0, 200))); +- Integer statementId = computeThriftId(queryString, keyspace); +- thriftPreparedStatements.put(statementId, prepared); +- return ResultMessage.Prepared.forThrift(statementId, prepared.boundNames); +- } +- else +- { +- if (statementSize > capacityToBytes(DatabaseDescriptor.getPreparedStatementsCacheSizeMB())) +- throw new InvalidRequestException(String.format("Prepared statement of size %d bytes is larger than allowed maximum of %d MB: %s...", +- statementSize, +- DatabaseDescriptor.getPreparedStatementsCacheSizeMB(), +- queryString.substring(0, 200))); +- MD5Digest statementId = computeId(queryString, keyspace); +- preparedStatements.put(statementId, prepared); +- return new ResultMessage.Prepared(statementId, prepared); +- } ++ if (statementSize > capacityToBytes(DatabaseDescriptor.getPreparedStatementsCacheSizeMB())) ++ throw new InvalidRequestException(String.format("Prepared statement of size %d bytes is larger than allowed maximum of %d MB: %s...", ++ statementSize, ++ DatabaseDescriptor.getPreparedStatementsCacheSizeMB(), ++ queryString.substring(0, 200))); ++ MD5Digest statementId = computeId(queryString, keyspace); ++ preparedStatements.put(statementId, prepared); ++ return new ResultMessage.Prepared(statementId, prepared); + } + + public ResultMessage processPrepared(CQLStatement statement, +@@ -559,7 +513,6 @@ public class QueryProcessor implements QueryHandler + { + removeInvalidPreparedStatements(internalStatements.values().iterator(), ksName, cfName); + removeInvalidPreparedStatements(preparedStatements.values().iterator(), ksName, cfName); +- removeInvalidPreparedStatements(thriftPreparedStatements.values().iterator(), ksName, cfName); + } + + private void removeInvalidPreparedStatements(Iterator iterator, String ksName, String cfName) +@@ -675,7 +628,6 @@ public class QueryProcessor implements QueryHandler + { + removeInvalidPreparedStatementsForFunction(internalStatements.values().iterator(), ksName, functionName); + removeInvalidPreparedStatementsForFunction(preparedStatements.values().iterator(), ksName, functionName); +- removeInvalidPreparedStatementsForFunction(thriftPreparedStatements.values().iterator(), ksName, functionName); + } + + private static void removeInvalidPreparedStatementsForFunction(Iterator statements, +diff --git a/src/java/org/apache/cassandra/cql3/ResultSet.java b/src/java/org/apache/cassandra/cql3/ResultSet.java +index 9010b20..b690b64 100644 +--- a/src/java/org/apache/cassandra/cql3/ResultSet.java ++++ b/src/java/org/apache/cassandra/cql3/ResultSet.java +@@ -25,11 +25,6 @@ import io.netty.buffer.ByteBuf; + import org.apache.cassandra.transport.*; + import org.apache.cassandra.db.marshal.AbstractType; + import org.apache.cassandra.db.marshal.ReversedType; +-import org.apache.cassandra.thrift.Column; +-import org.apache.cassandra.thrift.CqlMetadata; +-import org.apache.cassandra.thrift.CqlResult; +-import org.apache.cassandra.thrift.CqlResultType; +-import org.apache.cassandra.thrift.CqlRow; + import org.apache.cassandra.utils.ByteBufferUtil; + import org.apache.cassandra.service.pager.PagingState; + +@@ -95,44 +90,6 @@ public class ResultSet + } + } + +- public CqlResult toThriftResult() +- { +- assert metadata.names != null; +- +- String UTF8 = "UTF8Type"; +- CqlMetadata schema = new CqlMetadata(new HashMap(), +- new HashMap(), +- // The 2 following ones shouldn't be needed in CQL3 +- UTF8, UTF8); +- +- for (int i = 0; i < metadata.columnCount; i++) +- { +- ColumnSpecification spec = metadata.names.get(i); +- ByteBuffer colName = ByteBufferUtil.bytes(spec.name.toString()); +- schema.name_types.put(colName, UTF8); +- AbstractType normalizedType = spec.type instanceof ReversedType ? ((ReversedType)spec.type).baseType : spec.type; +- schema.value_types.put(colName, normalizedType.toString()); +- +- } +- +- List cqlRows = new ArrayList(rows.size()); +- for (List row : rows) +- { +- List thriftCols = new ArrayList(metadata.columnCount); +- for (int i = 0; i < metadata.columnCount; i++) +- { +- Column col = new Column(ByteBufferUtil.bytes(metadata.names.get(i).name.toString())); +- col.setValue(row.get(i)); +- thriftCols.add(col); +- } +- // The key of CqlRow shoudn't be needed in CQL3 +- cqlRows.add(new CqlRow(ByteBufferUtil.EMPTY_BYTE_BUFFER, thriftCols)); +- } +- CqlResult res = new CqlResult(CqlResultType.ROWS); +- res.setRows(cqlRows).setSchema(schema); +- return res; +- } +- + @Override + public String toString() + { +diff --git a/src/java/org/apache/cassandra/cql3/UpdateParameters.java b/src/java/org/apache/cassandra/cql3/UpdateParameters.java +index d2c01c8..1f07183 100644 +--- a/src/java/org/apache/cassandra/cql3/UpdateParameters.java ++++ b/src/java/org/apache/cassandra/cql3/UpdateParameters.java +@@ -84,8 +84,7 @@ public class UpdateParameters + { + if (metadata.isDense() && !metadata.isCompound()) + { +- // If it's a COMPACT STORAGE table with a single clustering column, the clustering value is +- // translated in Thrift to the full Thrift column name, and for backward compatibility we ++ // If it's a COMPACT STORAGE table with a single clustering column and for backward compatibility we + // don't want to allow that to be empty (even though this would be fine for the storage engine). + assert clustering.size() == 1; + ByteBuffer value = clustering.get(0); +@@ -122,9 +121,8 @@ public class UpdateParameters + public void addRowDeletion() + { + // For compact tables, at the exclusion of the static row (of static compact tables), each row ever has a single column, +- // the "compact" one. As such, deleting the row or deleting that single cell is equivalent. We favor the later however +- // because that makes it easier when translating back to the old format layout (for thrift and pre-3.0 backward +- // compatibility) as we don't have to special case for the row deletion. This is also in line with what we used to do pre-3.0. ++ // the "compact" one. As such, deleting the row or deleting that single cell is equivalent. We favor the later ++ // for backward compatibility (thought it doesn'ttruly matter anymore). + if (metadata.isCompactTable() && builder.clustering() != Clustering.STATIC_CLUSTERING) + addTombstone(metadata.compactValueColumn()); + else +diff --git a/src/java/org/apache/cassandra/cql3/Validation.java b/src/java/org/apache/cassandra/cql3/Validation.java +new file mode 100644 +index 0000000..c7f0094 +--- /dev/null ++++ b/src/java/org/apache/cassandra/cql3/Validation.java +@@ -0,0 +1,117 @@ ++ ++/* ++ * Licensed to the Apache Software Foundation (ASF) under one ++ * or more contributor license agreements. See the NOTICE file ++ * distributed with this work for additional information ++ * regarding copyright ownership. The ASF licenses this file ++ * to you under the Apache License, Version 2.0 (the ++ * "License"); you may not use this file except in compliance ++ * with the License. You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++package org.apache.cassandra.cql3; ++ ++import java.nio.ByteBuffer; ++ ++import org.apache.cassandra.config.CFMetaData; ++import org.apache.cassandra.config.Schema; ++import org.apache.cassandra.db.KeyspaceNotDefinedException; ++import org.apache.cassandra.serializers.MarshalException; ++import org.apache.cassandra.exceptions.InvalidRequestException; ++import org.apache.cassandra.utils.FBUtilities; ++ ++/** ++ * A collection of static validation functions reused across statements. ++ * ++ * Note: this hosts functions that were historically in ThriftValidation, but ++ * it's not necessary clear that this is the best place to have this (this is ++ * certainly not horrible either though). ++ */ ++public abstract class Validation ++{ ++ /** ++ * Retrieves the metadata for the provided keyspace and table name, throwing ++ * a meaningful user exception if those doen't exist. ++ * ++ * @param keyspaceName the keyspace name. ++ * @param tableName the table name. ++ * @return the metadata for table {@code keyspaceName.tableName} if it ++ * exists (otherwise an {@code InvalidRequestException} is thrown). ++ * ++ * @throws InvalidRequestException if the table requested doesn't exist. ++ */ ++ public static CFMetaData validateColumnFamily(String keyspaceName, String tableName) ++ throws InvalidRequestException ++ { ++ validateKeyspace(keyspaceName); ++ if (tableName.isEmpty()) ++ throw new InvalidRequestException("non-empty table is required"); ++ ++ CFMetaData metadata = Schema.instance.getCFMetaData(keyspaceName, tableName); ++ if (metadata == null) ++ throw new InvalidRequestException("unconfigured table " + tableName); ++ ++ return metadata; ++ } ++ ++ private static void validateKeyspace(String keyspaceName) ++ throws KeyspaceNotDefinedException ++ { ++ if (!Schema.instance.getKeyspaces().contains(keyspaceName)) ++ throw new KeyspaceNotDefinedException("Keyspace " + keyspaceName + " does not exist"); ++ } ++ ++ /** ++ * Validates a (full serialized) partition key. ++ * ++ * @param metadata the metadata for the table of which to check the key. ++ * @param key the serialized partition key to check. ++ * ++ * @throws InvalidRequestException if the provided {@code key} is invalid. ++ */ ++ public static void validateKey(CFMetaData metadata, ByteBuffer key) ++ throws InvalidRequestException ++ { ++ if (key == null || key.remaining() == 0) ++ throw new InvalidRequestException("Key may not be empty"); ++ ++ // check that key can be handled by FBUtilities.writeShortByteArray ++ if (key.remaining() > FBUtilities.MAX_UNSIGNED_SHORT) ++ { ++ throw new InvalidRequestException("Key length of " + key.remaining() + ++ " is longer than maximum of " + ++ FBUtilities.MAX_UNSIGNED_SHORT); ++ } ++ ++ try ++ { ++ metadata.getKeyValidator().validate(key); ++ } ++ catch (MarshalException e) ++ { ++ throw new InvalidRequestException(e.getMessage()); ++ } ++ } ++ ++ /** ++ * Validates that the provided keyspace is not one of the system keyspace. ++ * ++ * @param keyspace the keyspace name to validate. ++ * ++ * @throws InvalidRequestException if {@code keyspace} is the name of a ++ * system keyspace. ++ */ ++ public static void validateKeyspaceNotSystem(String keyspace) ++ throws InvalidRequestException ++ { ++ if (Schema.isSystemKeyspace(keyspace)) ++ throw new InvalidRequestException(String.format("%s keyspace is not user-modifiable", keyspace)); ++ } ++} +diff --git a/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java b/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java +index afe2776..daeaff1 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java +@@ -27,6 +27,7 @@ import org.apache.cassandra.config.*; + import org.apache.cassandra.cql3.CFName; + import org.apache.cassandra.cql3.CQL3Type; + import org.apache.cassandra.cql3.ColumnIdentifier; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.db.ColumnFamilyStore; + import org.apache.cassandra.db.Keyspace; + import org.apache.cassandra.db.marshal.AbstractType; +@@ -41,8 +42,7 @@ import org.apache.cassandra.schema.TableParams; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.MigrationManager; + import org.apache.cassandra.transport.Event; +- +-import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily; ++import org.apache.cassandra.utils.FBUtilities; + + public class AlterTableStatement extends SchemaAlteringStatement + { +@@ -81,7 +81,7 @@ public class AlterTableStatement extends SchemaAlteringStatement + + public Event.SchemaChange announceMigration(boolean isLocalOnly) throws RequestValidationException + { +- CFMetaData meta = validateColumnFamily(keyspace(), columnFamily()); ++ CFMetaData meta = Validation.validateColumnFamily(keyspace(), columnFamily()); + if (meta.isView()) + throw new InvalidRequestException("Cannot use ALTER TABLE on Materialized View"); + +@@ -260,7 +260,7 @@ public class AlterTableStatement extends SchemaAlteringStatement + } + assert toDelete != null; + cfm.removeColumnDefinition(toDelete); +- cfm.recordColumnDrop(toDelete); ++ cfm.recordColumnDrop(toDelete, FBUtilities.timestampMicros()); + break; + } + +@@ -388,11 +388,10 @@ public class AlterTableStatement extends SchemaAlteringStatement + break; + case REGULAR: + case STATIC: +- // Thrift allows to change a column validator so CFMetaData.validateCompatibility will let it slide +- // if we change to an incompatible type (contrarily to the comparator case). But we don't want to +- // allow it for CQL3 (see #5882) so validating it explicitly here. We only care about value compatibility +- // though since we won't compare values (except when there is an index, but that is validated by +- // ColumnDefinition already). ++ // As above, we want a clear error message, but in this case it happens that CFMetaData.validateCompatibility *does not* ++ // validate this for historical reasons so it's doubtly important. Note that we only care about value compatibility ++ // though since we won't compare values (except when there is an index, but that is validated by ColumnDefinition already). ++ // TODO: we could clear out where validation is done and do it only once. + if (!validatorType.isValueCompatibleWith(def.type)) + throw new ConfigurationException(String.format("Cannot change %s from type %s to type %s: types are incompatible.", + def.name, +diff --git a/src/java/org/apache/cassandra/cql3/statements/AlterViewStatement.java b/src/java/org/apache/cassandra/cql3/statements/AlterViewStatement.java +index 5b1699b..926180e 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/AlterViewStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/AlterViewStatement.java +@@ -22,6 +22,7 @@ import org.apache.cassandra.config.CFMetaData; + import org.apache.cassandra.config.Schema; + import org.apache.cassandra.config.ViewDefinition; + import org.apache.cassandra.cql3.CFName; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.db.view.View; + import org.apache.cassandra.exceptions.InvalidRequestException; + import org.apache.cassandra.exceptions.RequestValidationException; +@@ -31,8 +32,6 @@ import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.MigrationManager; + import org.apache.cassandra.transport.Event; + +-import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily; +- + public class AlterViewStatement extends SchemaAlteringStatement + { + private final TableAttributes attrs; +@@ -57,7 +56,7 @@ public class AlterViewStatement extends SchemaAlteringStatement + + public Event.SchemaChange announceMigration(boolean isLocalOnly) throws RequestValidationException + { +- CFMetaData meta = validateColumnFamily(keyspace(), columnFamily()); ++ CFMetaData meta = Validation.validateColumnFamily(keyspace(), columnFamily()); + if (!meta.isView()) + throw new InvalidRequestException("Cannot use ALTER MATERIALIZED VIEW on Table"); + +diff --git a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java +index 2739c2e..3adce47 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java +@@ -79,11 +79,10 @@ public class BatchStatement implements CQLStatement + "entries to expire before being replayed."; + + /** +- * Creates a new BatchStatement from a list of statements and a +- * Thrift consistency level. ++ * Creates a new BatchStatement. + * + * @param type type of the batch +- * @param statements a list of UpdateStatements ++ * @param statements the list of statements in the batch + * @param attrs additional attributes for statement (CL, timestamp, timeToLive) + */ + public BatchStatement(int boundTerms, Type type, List statements, Attributes attrs) +diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateAggregateStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateAggregateStatement.java +index 98e2433..68b72f4 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/CreateAggregateStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/CreateAggregateStatement.java +@@ -34,7 +34,6 @@ import org.apache.cassandra.serializers.MarshalException; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.MigrationManager; + import org.apache.cassandra.service.QueryState; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.Event; + import org.apache.cassandra.transport.Server; + +@@ -161,7 +160,7 @@ public final class CreateAggregateStatement extends SchemaAlteringStatement + if (!functionName.hasKeyspace()) + throw new InvalidRequestException("Functions must be fully qualified with a keyspace name if a keyspace is not set for the session"); + +- ThriftValidation.validateKeyspaceNotSystem(functionName.keyspace); ++ Validation.validateKeyspaceNotSystem(functionName.keyspace); + + stateFunc = new FunctionName(functionName.keyspace, stateFunc.name); + if (finalFunc != null) +diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateFunctionStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateFunctionStatement.java +index a54c49e..e0a1e6b 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/CreateFunctionStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/CreateFunctionStatement.java +@@ -26,6 +26,7 @@ import org.apache.cassandra.config.DatabaseDescriptor; + import org.apache.cassandra.config.Schema; + import org.apache.cassandra.cql3.CQL3Type; + import org.apache.cassandra.cql3.ColumnIdentifier; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.cql3.functions.*; + import org.apache.cassandra.db.marshal.AbstractType; + import org.apache.cassandra.exceptions.*; +@@ -33,7 +34,6 @@ import org.apache.cassandra.schema.Functions; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.MigrationManager; + import org.apache.cassandra.service.QueryState; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.Event; + + /** +@@ -98,7 +98,7 @@ public final class CreateFunctionStatement extends SchemaAlteringStatement + if (!functionName.hasKeyspace()) + throw new InvalidRequestException("Functions must be fully qualified with a keyspace name if a keyspace is not set for the session"); + +- ThriftValidation.validateKeyspaceNotSystem(functionName.keyspace); ++ Validation.validateKeyspaceNotSystem(functionName.keyspace); + } + + protected void grantPermissionsToCreator(QueryState state) +diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java +index f899247..d7ad632 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java +@@ -32,6 +32,7 @@ import org.apache.cassandra.config.Schema; + import org.apache.cassandra.cql3.CFName; + import org.apache.cassandra.cql3.ColumnIdentifier; + import org.apache.cassandra.cql3.IndexName; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.db.marshal.MapType; + import org.apache.cassandra.exceptions.InvalidRequestException; + import org.apache.cassandra.exceptions.RequestValidationException; +@@ -40,7 +41,6 @@ import org.apache.cassandra.schema.IndexMetadata; + import org.apache.cassandra.schema.Indexes; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.MigrationManager; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.Event; + + /** A CREATE INDEX statement parsed from a CQL query. */ +@@ -73,7 +73,7 @@ public class CreateIndexStatement extends SchemaAlteringStatement + + public void validate(ClientState state) throws RequestValidationException + { +- CFMetaData cfm = ThriftValidation.validateColumnFamily(keyspace(), columnFamily()); ++ CFMetaData cfm = Validation.validateColumnFamily(keyspace(), columnFamily()); + + if (cfm.isCounter()) + throw new InvalidRequestException("Secondary indexes are not supported on counter tables"); +diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateKeyspaceStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateKeyspaceStatement.java +index f88c04f..9b8a30f 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/CreateKeyspaceStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/CreateKeyspaceStatement.java +@@ -20,6 +20,7 @@ package org.apache.cassandra.cql3.statements; + import java.util.regex.Pattern; + + import org.apache.cassandra.auth.*; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.config.DatabaseDescriptor; + import org.apache.cassandra.config.Schema; + import org.apache.cassandra.exceptions.*; +@@ -27,7 +28,6 @@ import org.apache.cassandra.locator.LocalStrategy; + import org.apache.cassandra.schema.KeyspaceMetadata; + import org.apache.cassandra.schema.KeyspaceParams; + import org.apache.cassandra.service.*; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.Event; + + /** A CREATE KEYSPACE statement parsed from a CQL query. */ +@@ -74,7 +74,7 @@ public class CreateKeyspaceStatement extends SchemaAlteringStatement + */ + public void validate(ClientState state) throws RequestValidationException + { +- ThriftValidation.validateKeyspaceNotSystem(name); ++ Validation.validateKeyspaceNotSystem(name); + + // keyspace name + if (!PATTERN_WORD_CHARS.matcher(name).matches()) +diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java +index 08c3a4c..e156e26 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java +@@ -288,11 +288,11 @@ public class CreateTableStatement extends SchemaAlteringStatement + } + + boolean useCompactStorage = properties.useCompactStorage; +- // Dense means that on the thrift side, no part of the "thrift column name" stores a "CQL/metadata column name". +- // This means COMPACT STORAGE with at least one clustering type (otherwise it's a thrift "static" CF). ++ // Dense meant, back with thrift, that no part of the "thrift column name" stores a "CQL/metadata column name". ++ // This means COMPACT STORAGE with at least one clustering type (otherwise it's a "static" CF). + stmt.isDense = useCompactStorage && !stmt.clusteringTypes.isEmpty(); +- // Compound means that on the thrift side, the "thrift column name" is a composite one. It's the case unless +- // we use compact storage COMPACT STORAGE and we have either no clustering columns (thrift "static" CF) or ++ // Compound meant the "thrift column name" was a composite one. It's the case unless ++ // we use compact storage COMPACT STORAGE and we have either no clustering columns ("static" CF) or + // only one of them (if more than one, it's a "dense composite"). + stmt.isCompound = !(useCompactStorage && stmt.clusteringTypes.size() <= 1); + +diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateTriggerStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateTriggerStatement.java +index 94cfc15..c43dd0e 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/CreateTriggerStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/CreateTriggerStatement.java +@@ -23,6 +23,7 @@ import org.slf4j.LoggerFactory; + import org.apache.cassandra.config.CFMetaData; + import org.apache.cassandra.config.Schema; + import org.apache.cassandra.cql3.CFName; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.exceptions.ConfigurationException; + import org.apache.cassandra.exceptions.InvalidRequestException; + import org.apache.cassandra.exceptions.RequestValidationException; +@@ -31,7 +32,6 @@ import org.apache.cassandra.schema.TriggerMetadata; + import org.apache.cassandra.schema.Triggers; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.MigrationManager; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.Event; + import org.apache.cassandra.triggers.TriggerExecutor; + +@@ -58,7 +58,7 @@ public class CreateTriggerStatement extends SchemaAlteringStatement + + public void validate(ClientState state) throws RequestValidationException + { +- CFMetaData cfm = ThriftValidation.validateColumnFamily(keyspace(), columnFamily()); ++ CFMetaData cfm = Validation.validateColumnFamily(keyspace(), columnFamily()); + if (cfm.isView()) + throw new InvalidRequestException("Cannot CREATE TRIGGER against a materialized view"); + +diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateViewStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateViewStatement.java +index 013adbc..c2e6360 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/CreateViewStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/CreateViewStatement.java +@@ -43,7 +43,6 @@ import org.apache.cassandra.schema.TableParams; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.ClientWarn; + import org.apache.cassandra.service.MigrationManager; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.Event; + + public class CreateViewStatement extends SchemaAlteringStatement +@@ -132,7 +131,7 @@ public class CreateViewStatement extends SchemaAlteringStatement + if (!baseName.getKeyspace().equals(keyspace())) + throw new InvalidRequestException("Cannot create a materialized view on a table in a separate keyspace"); + +- CFMetaData cfm = ThriftValidation.validateColumnFamily(baseName.getKeyspace(), baseName.getColumnFamily()); ++ CFMetaData cfm = Validation.validateColumnFamily(baseName.getKeyspace(), baseName.getColumnFamily()); + + if (cfm.isCounter()) + throw new InvalidRequestException("Materialized views are not supported on counter tables"); +diff --git a/src/java/org/apache/cassandra/cql3/statements/DropAggregateStatement.java b/src/java/org/apache/cassandra/cql3/statements/DropAggregateStatement.java +index 2b1432b..c770805 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/DropAggregateStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/DropAggregateStatement.java +@@ -24,6 +24,7 @@ import java.util.List; + import org.apache.cassandra.auth.Permission; + import org.apache.cassandra.config.Schema; + import org.apache.cassandra.cql3.CQL3Type; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.cql3.functions.*; + import org.apache.cassandra.db.marshal.AbstractType; + import org.apache.cassandra.exceptions.InvalidRequestException; +@@ -31,7 +32,6 @@ import org.apache.cassandra.exceptions.RequestValidationException; + import org.apache.cassandra.exceptions.UnauthorizedException; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.MigrationManager; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.Event; + + /** +@@ -63,7 +63,7 @@ public final class DropAggregateStatement extends SchemaAlteringStatement + if (!functionName.hasKeyspace()) + throw new InvalidRequestException("Functions must be fully qualified with a keyspace name if a keyspace is not set for the session"); + +- ThriftValidation.validateKeyspaceNotSystem(functionName.keyspace); ++ Validation.validateKeyspaceNotSystem(functionName.keyspace); + } + + public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException +diff --git a/src/java/org/apache/cassandra/cql3/statements/DropFunctionStatement.java b/src/java/org/apache/cassandra/cql3/statements/DropFunctionStatement.java +index 6f11f9c..6ab4189 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/DropFunctionStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/DropFunctionStatement.java +@@ -27,6 +27,7 @@ import org.apache.cassandra.auth.FunctionResource; + import org.apache.cassandra.auth.Permission; + import org.apache.cassandra.config.Schema; + import org.apache.cassandra.cql3.CQL3Type; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.cql3.functions.*; + import org.apache.cassandra.db.marshal.AbstractType; + import org.apache.cassandra.exceptions.InvalidRequestException; +@@ -35,7 +36,6 @@ import org.apache.cassandra.exceptions.UnauthorizedException; + import org.apache.cassandra.schema.KeyspaceMetadata; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.MigrationManager; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.Event; + + /** +@@ -93,7 +93,7 @@ public final class DropFunctionStatement extends SchemaAlteringStatement + if (!functionName.hasKeyspace()) + throw new InvalidRequestException("Functions must be fully qualified with a keyspace name if a keyspace is not set for the session"); + +- ThriftValidation.validateKeyspaceNotSystem(functionName.keyspace); ++ Validation.validateKeyspaceNotSystem(functionName.keyspace); + } + + public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException +diff --git a/src/java/org/apache/cassandra/cql3/statements/DropKeyspaceStatement.java b/src/java/org/apache/cassandra/cql3/statements/DropKeyspaceStatement.java +index a08b193..5119462 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/DropKeyspaceStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/DropKeyspaceStatement.java +@@ -18,13 +18,13 @@ + package org.apache.cassandra.cql3.statements; + + import org.apache.cassandra.auth.Permission; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.exceptions.ConfigurationException; + import org.apache.cassandra.exceptions.InvalidRequestException; + import org.apache.cassandra.exceptions.RequestValidationException; + import org.apache.cassandra.exceptions.UnauthorizedException; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.MigrationManager; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.Event; + + public class DropKeyspaceStatement extends SchemaAlteringStatement +@@ -46,7 +46,7 @@ public class DropKeyspaceStatement extends SchemaAlteringStatement + + public void validate(ClientState state) throws RequestValidationException + { +- ThriftValidation.validateKeyspaceNotSystem(keyspace); ++ Validation.validateKeyspaceNotSystem(keyspace); + } + + @Override +diff --git a/src/java/org/apache/cassandra/cql3/statements/DropTriggerStatement.java b/src/java/org/apache/cassandra/cql3/statements/DropTriggerStatement.java +index 3f61e01..26a1c00 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/DropTriggerStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/DropTriggerStatement.java +@@ -23,6 +23,7 @@ import org.slf4j.LoggerFactory; + import org.apache.cassandra.config.CFMetaData; + import org.apache.cassandra.config.Schema; + import org.apache.cassandra.cql3.CFName; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.exceptions.ConfigurationException; + import org.apache.cassandra.exceptions.InvalidRequestException; + import org.apache.cassandra.exceptions.RequestValidationException; +@@ -30,7 +31,6 @@ import org.apache.cassandra.exceptions.UnauthorizedException; + import org.apache.cassandra.schema.Triggers; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.MigrationManager; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.Event; + + public class DropTriggerStatement extends SchemaAlteringStatement +@@ -55,7 +55,7 @@ public class DropTriggerStatement extends SchemaAlteringStatement + + public void validate(ClientState state) throws RequestValidationException + { +- ThriftValidation.validateColumnFamily(keyspace(), columnFamily()); ++ Validation.validateColumnFamily(keyspace(), columnFamily()); + } + + public Event.SchemaChange announceMigration(boolean isLocalOnly) throws ConfigurationException, InvalidRequestException +diff --git a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java +index 8d85498..02725c9 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java +@@ -44,7 +44,6 @@ import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.QueryState; + import org.apache.cassandra.service.StorageProxy; + import org.apache.cassandra.service.paxos.Commit; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.messages.ResultMessage; + import org.apache.cassandra.triggers.TriggerExecutor; + import org.apache.cassandra.utils.FBUtilities; +@@ -644,7 +643,7 @@ public abstract class ModificationStatement implements CQLStatement + now); + for (ByteBuffer key : keys) + { +- ThriftValidation.validateKey(cfm, key); ++ Validation.validateKey(cfm, key); + DecoratedKey dk = cfm.decorateKey(key); + + PartitionUpdate upd = collector.getPartitionUpdate(cfm, dk, options.getConsistency()); +@@ -661,7 +660,7 @@ public abstract class ModificationStatement implements CQLStatement + + for (ByteBuffer key : keys) + { +- ThriftValidation.validateKey(cfm, key); ++ Validation.validateKey(cfm, key); + DecoratedKey dk = cfm.decorateKey(key); + + PartitionUpdate upd = collector.getPartitionUpdate(cfm, dk, options.getConsistency()); +@@ -779,13 +778,13 @@ public abstract class ModificationStatement implements CQLStatement + { + VariableSpecifications boundNames = getBoundVariables(); + ModificationStatement statement = prepare(boundNames); +- CFMetaData cfm = ThriftValidation.validateColumnFamily(keyspace(), columnFamily()); ++ CFMetaData cfm = Validation.validateColumnFamily(keyspace(), columnFamily()); + return new ParsedStatement.Prepared(statement, boundNames, boundNames.getPartitionKeyBindIndexes(cfm)); + } + + public ModificationStatement prepare(VariableSpecifications boundNames) + { +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace(), columnFamily()); ++ CFMetaData metadata = Validation.validateColumnFamily(keyspace(), columnFamily()); + + Attributes preparedAttributes = attrs.prepare(keyspace(), columnFamily()); + preparedAttributes.collectMarkerSpecification(boundNames); +diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java +index a8b97d1..ddd0f11 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java +@@ -53,7 +53,6 @@ import org.apache.cassandra.service.ClientWarn; + import org.apache.cassandra.service.QueryState; + import org.apache.cassandra.service.pager.PagingState; + import org.apache.cassandra.service.pager.QueryPager; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.transport.messages.ResultMessage; + import org.apache.cassandra.utils.ByteBufferUtil; + import org.apache.cassandra.utils.FBUtilities; +@@ -890,7 +889,7 @@ public class SelectStatement implements CQLStatement + + public ParsedStatement.Prepared prepare(boolean forView) throws InvalidRequestException + { +- CFMetaData cfm = ThriftValidation.validateColumnFamily(keyspace(), columnFamily()); ++ CFMetaData cfm = Validation.validateColumnFamily(keyspace(), columnFamily()); + VariableSpecifications boundNames = getBoundVariables(); + + Selection selection = selectClause.isEmpty() +diff --git a/src/java/org/apache/cassandra/cql3/statements/TruncateStatement.java b/src/java/org/apache/cassandra/cql3/statements/TruncateStatement.java +index 336091d..ddc0c75 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/TruncateStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/TruncateStatement.java +@@ -31,7 +31,6 @@ import org.apache.cassandra.transport.messages.ResultMessage; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.QueryState; + import org.apache.cassandra.service.StorageProxy; +-import org.apache.cassandra.thrift.ThriftValidation; + + public class TruncateStatement extends CFStatement implements CQLStatement + { +@@ -57,7 +56,7 @@ public class TruncateStatement extends CFStatement implements CQLStatement + + public void validate(ClientState state) throws InvalidRequestException + { +- ThriftValidation.validateColumnFamily(keyspace(), columnFamily()); ++ Validation.validateColumnFamily(keyspace(), columnFamily()); + } + + public ResultMessage execute(QueryState state, QueryOptions options) throws InvalidRequestException, TruncateException +diff --git a/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java b/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java +index 3657f94..a028abd 100644 +--- a/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java ++++ b/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java +@@ -75,12 +75,9 @@ public class UpdateStatement extends ModificationStatement + + List updates = getRegularOperations(); + +- // For compact table, when we translate it to thrift, we don't have a row marker. So we don't accept an insert/update +- // that only sets the PK unless the is no declared non-PK columns (in the latter we just set the value empty). +- +- // For a dense layout, when we translate it to thrift, we don't have a row marker. So we don't accept an insert/update +- // that only sets the PK unless the is no declared non-PK columns (which we recognize because in that case the compact +- // value is of type "EmptyType"). ++ // For compact table, we don't accept an insert/update that only sets the PK unless the is no ++ // declared non-PK columns (which we recognize because in that case ++ // the compact value is of type "EmptyType"). + if (cfm.isCompactTable() && updates.isEmpty()) + { + checkTrue(CompactTables.hasEmptyCompactValue(cfm), +diff --git a/src/java/org/apache/cassandra/db/BufferClustering.java b/src/java/org/apache/cassandra/db/BufferClustering.java +index df6a473..0c2ecbc 100644 +--- a/src/java/org/apache/cassandra/db/BufferClustering.java ++++ b/src/java/org/apache/cassandra/db/BufferClustering.java +@@ -27,9 +27,8 @@ import java.nio.ByteBuffer; + * prefix used by rows. + *

+ * Note however that while it's size must be equal to the table clustering size, a clustering can have +- * {@code null} values, and this mostly for thrift backward compatibility (in practice, if a value is null, +- * all of the following ones will be too because that's what thrift allows, but it's never assumed by the +- * code so we could start generally allowing nulls for clustering columns if we wanted to). ++ * {@code null} values (this is currently only allowed in COMPACT table for historical reasons, but we ++ * could imagine lifting that limitation if we decide it make sense from a CQL point of view). + */ + public class BufferClustering extends AbstractBufferClusteringPrefix implements Clustering + { +diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +index aa79e90..deb229d 100644 +--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java ++++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +@@ -1714,7 +1714,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean + keyIter.hasNext(); ) + { + CounterCacheKey key = keyIter.next(); +- DecoratedKey dk = decorateKey(ByteBuffer.wrap(key.partitionKey)); ++ DecoratedKey dk = decorateKey(key.partitionKey()); + if (key.ksAndCFName.equals(metadata.ksAndCFName) && !Range.isInRanges(dk.getToken(), ranges)) + CacheService.instance.counterCache.remove(key); + } +@@ -1967,7 +1967,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean + keyIter.hasNext(); ) + { + CounterCacheKey key = keyIter.next(); +- DecoratedKey dk = decorateKey(ByteBuffer.wrap(key.partitionKey)); ++ DecoratedKey dk = decorateKey(key.partitionKey()); + if (key.ksAndCFName.equals(metadata.ksAndCFName) && Bounds.isInBounds(dk.getToken(), boundsToInvalidate)) + { + CacheService.instance.counterCache.remove(key); +diff --git a/src/java/org/apache/cassandra/db/CompactTables.java b/src/java/org/apache/cassandra/db/CompactTables.java +index 0d9c5df..31e482c 100644 +--- a/src/java/org/apache/cassandra/db/CompactTables.java ++++ b/src/java/org/apache/cassandra/db/CompactTables.java +@@ -74,9 +74,8 @@ import org.apache.cassandra.utils.ByteBufferUtil; + public abstract class CompactTables + { + // We use an empty value for the 1) this can't conflict with a user-defined column and 2) this actually +- // validate with any comparator which makes it convenient for columnDefinitionComparator(). ++ // validate with any comparator. + public static final ByteBuffer SUPER_COLUMN_MAP_COLUMN = ByteBufferUtil.EMPTY_BYTE_BUFFER; +- public static final String SUPER_COLUMN_MAP_COLUMN_STR = UTF8Type.instance.compose(SUPER_COLUMN_MAP_COLUMN); + + private CompactTables() {} + +@@ -93,14 +92,6 @@ public abstract class CompactTables + return columns.regulars.getSimple(0); + } + +- public static AbstractType columnDefinitionComparator(String kind, boolean isSuper, AbstractType rawComparator, AbstractType rawSubComparator) +- { +- if (!"regular".equals(kind)) +- return UTF8Type.instance; +- +- return isSuper ? rawSubComparator : rawComparator; +- } +- + public static boolean hasEmptyCompactValue(CFMetaData metadata) + { + return metadata.compactValueColumn().type instanceof EmptyType; +@@ -113,25 +104,15 @@ public abstract class CompactTables + + public static DefaultNames defaultNameGenerator(Set usedNames) + { +- return new DefaultNames(new HashSet(usedNames)); +- } +- +- public static DefaultNames defaultNameGenerator(Iterable defs) +- { +- Set usedNames = new HashSet<>(); +- for (ColumnDefinition def : defs) +- usedNames.add(def.name.toString()); +- return new DefaultNames(usedNames); ++ return new DefaultNames(new HashSet<>(usedNames)); + } + + public static class DefaultNames + { +- private static final String DEFAULT_PARTITION_KEY_NAME = "key"; + private static final String DEFAULT_CLUSTERING_NAME = "column"; + private static final String DEFAULT_COMPACT_VALUE_NAME = "value"; + + private final Set usedNames; +- private int partitionIndex = 0; + private int clusteringIndex = 1; + private int compactIndex = 0; + +@@ -140,19 +121,6 @@ public abstract class CompactTables + this.usedNames = usedNames; + } + +- public String defaultPartitionKeyName() +- { +- while (true) +- { +- // For compatibility sake, we call the first alias 'key' rather than 'key1'. This +- // is inconsistent with column alias, but it's probably not worth risking breaking compatibility now. +- String candidate = partitionIndex == 0 ? DEFAULT_PARTITION_KEY_NAME : DEFAULT_PARTITION_KEY_NAME + (partitionIndex + 1); +- ++partitionIndex; +- if (usedNames.add(candidate)) +- return candidate; +- } +- } +- + public String defaultClusteringName() + { + while (true) +diff --git a/src/java/org/apache/cassandra/db/EmptyIterators.java b/src/java/org/apache/cassandra/db/EmptyIterators.java +index 6bf8fff..24c923f 100644 +--- a/src/java/org/apache/cassandra/db/EmptyIterators.java ++++ b/src/java/org/apache/cassandra/db/EmptyIterators.java +@@ -53,17 +53,10 @@ public class EmptyIterators + private static class EmptyUnfilteredPartitionIterator extends EmptyBasePartitionIterator implements UnfilteredPartitionIterator + { + final CFMetaData metadata; +- final boolean isForThrift; + +- public EmptyUnfilteredPartitionIterator(CFMetaData metadata, boolean isForThrift) ++ public EmptyUnfilteredPartitionIterator(CFMetaData metadata) + { + this.metadata = metadata; +- this.isForThrift = isForThrift; +- } +- +- public boolean isForThrift() +- { +- return isForThrift; + } + + public CFMetaData metadata() +@@ -177,9 +170,9 @@ public class EmptyIterators + } + } + +- public static UnfilteredPartitionIterator unfilteredPartition(CFMetaData metadata, boolean isForThrift) ++ public static UnfilteredPartitionIterator unfilteredPartition(CFMetaData metadata) + { +- return new EmptyUnfilteredPartitionIterator(metadata, isForThrift); ++ return new EmptyUnfilteredPartitionIterator(metadata); + } + + public static PartitionIterator partition() +diff --git a/src/java/org/apache/cassandra/db/LegacyLayout.java b/src/java/org/apache/cassandra/db/LegacyLayout.java +deleted file mode 100644 +index 9e7e9b6..0000000 +--- a/src/java/org/apache/cassandra/db/LegacyLayout.java ++++ /dev/null +@@ -1,2380 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.db; +- +-import java.io.DataInput; +-import java.io.IOException; +-import java.io.IOError; +-import java.nio.ByteBuffer; +-import java.security.MessageDigest; +-import java.util.*; +- +-import org.apache.cassandra.utils.AbstractIterator; +-import com.google.common.collect.Iterators; +-import com.google.common.collect.Lists; +-import com.google.common.collect.PeekingIterator; +- +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.config.ColumnDefinition; +-import org.apache.cassandra.db.filter.ColumnFilter; +-import org.apache.cassandra.db.filter.DataLimits; +-import org.apache.cassandra.db.rows.*; +-import org.apache.cassandra.db.partitions.*; +-import org.apache.cassandra.db.context.CounterContext; +-import org.apache.cassandra.db.marshal.*; +-import org.apache.cassandra.io.util.DataInputPlus; +-import org.apache.cassandra.io.util.DataOutputPlus; +-import org.apache.cassandra.net.MessagingService; +-import org.apache.cassandra.utils.*; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import static org.apache.cassandra.utils.ByteBufferUtil.bytes; +- +-/** +- * Functions to deal with the old format. +- */ +-public abstract class LegacyLayout +-{ +- private static final Logger logger = LoggerFactory.getLogger(LegacyLayout.class); +- +- public final static int MAX_CELL_NAME_LENGTH = FBUtilities.MAX_UNSIGNED_SHORT; +- +- public final static int STATIC_PREFIX = 0xFFFF; +- +- public final static int DELETION_MASK = 0x01; +- public final static int EXPIRATION_MASK = 0x02; +- public final static int COUNTER_MASK = 0x04; +- public final static int COUNTER_UPDATE_MASK = 0x08; +- private final static int RANGE_TOMBSTONE_MASK = 0x10; +- +- private LegacyLayout() {} +- +- public static AbstractType makeLegacyComparator(CFMetaData metadata) +- { +- ClusteringComparator comparator = metadata.comparator; +- if (!metadata.isCompound()) +- { +- assert comparator.size() == 1; +- return comparator.subtype(0); +- } +- +- boolean hasCollections = metadata.hasCollectionColumns() || metadata.hasDroppedCollectionColumns(); +- List> types = new ArrayList<>(comparator.size() + (metadata.isDense() ? 0 : 1) + (hasCollections ? 1 : 0)); +- +- types.addAll(comparator.subtypes()); +- +- if (!metadata.isDense()) +- { +- types.add(UTF8Type.instance); +- +- if (hasCollections) +- { +- Map defined = new HashMap<>(); +- +- for (CFMetaData.DroppedColumn def : metadata.getDroppedColumns().values()) +- if (def.type instanceof CollectionType && def.type.isMultiCell()) +- defined.put(bytes(def.name), (CollectionType) def.type); +- +- for (ColumnDefinition def : metadata.partitionColumns()) +- if (def.type instanceof CollectionType && def.type.isMultiCell()) +- defined.put(def.name.bytes, (CollectionType) def.type); +- +- types.add(ColumnToCollectionType.getInstance(defined)); +- } +- } +- return CompositeType.getInstance(types); +- } +- +- public static LegacyCellName decodeCellName(CFMetaData metadata, ByteBuffer superColumnName, ByteBuffer cellname) +- throws UnknownColumnException +- { +- assert cellname != null; +- if (metadata.isSuper()) +- { +- assert superColumnName != null; +- return decodeForSuperColumn(metadata, Clustering.make(superColumnName), cellname); +- } +- +- assert superColumnName == null; +- return decodeCellName(metadata, cellname); +- } +- +- private static LegacyCellName decodeForSuperColumn(CFMetaData metadata, Clustering clustering, ByteBuffer subcol) +- { +- ColumnDefinition def = metadata.getColumnDefinition(subcol); +- if (def != null) +- { +- // it's a statically defined subcolumn +- return new LegacyCellName(clustering, def, null); +- } +- +- def = metadata.compactValueColumn(); +- assert def != null && def.type instanceof MapType; +- return new LegacyCellName(clustering, def, subcol); +- } +- +- public static LegacyCellName decodeCellName(CFMetaData metadata, ByteBuffer cellname) throws UnknownColumnException +- { +- return decodeCellName(metadata, cellname, false); +- } +- +- public static LegacyCellName decodeCellName(CFMetaData metadata, ByteBuffer cellname, boolean readAllAsDynamic) throws UnknownColumnException +- { +- Clustering clustering = decodeClustering(metadata, cellname); +- +- if (metadata.isSuper()) +- return decodeForSuperColumn(metadata, clustering, CompositeType.extractComponent(cellname, 1)); +- +- if (metadata.isDense() || (metadata.isCompactTable() && readAllAsDynamic)) +- return new LegacyCellName(clustering, metadata.compactValueColumn(), null); +- +- ByteBuffer column = metadata.isCompound() ? CompositeType.extractComponent(cellname, metadata.comparator.size()) : cellname; +- if (column == null) +- { +- // Tables for composite 2ndary indexes used to be compound but dense, but we've transformed them into regular tables +- // (non compact ones) but with no regular column (i.e. we only care about the clustering). So we'll get here +- // in that case, and what we want to return is basically a row marker. +- if (metadata.partitionColumns().isEmpty()) +- return new LegacyCellName(clustering, null, null); +- +- // Otherwise, we shouldn't get there +- throw new IllegalArgumentException("No column name component found in cell name"); +- } +- +- // Row marker, this is ok +- if (!column.hasRemaining()) +- return new LegacyCellName(clustering, null, null); +- +- ColumnDefinition def = metadata.getColumnDefinition(column); +- if ((def == null) || def.isPrimaryKeyColumn()) +- { +- // If it's a compact table, it means the column is in fact a "dynamic" one +- if (metadata.isCompactTable()) +- return new LegacyCellName(Clustering.make(column), metadata.compactValueColumn(), null); +- +- if (def == null) +- throw new UnknownColumnException(metadata, column); +- else +- throw new IllegalArgumentException("Cannot add primary key column to partition update"); +- } +- +- ByteBuffer collectionElement = metadata.isCompound() ? CompositeType.extractComponent(cellname, metadata.comparator.size() + 1) : null; +- +- // Note that because static compact columns are translated to static defs in the new world order, we need to force a static +- // clustering if the definition is static (as it might not be in this case). +- return new LegacyCellName(def.isStatic() ? Clustering.STATIC_CLUSTERING : clustering, def, collectionElement); +- } +- +- public static LegacyBound decodeBound(CFMetaData metadata, ByteBuffer bound, boolean isStart) +- { +- if (!bound.hasRemaining()) +- return isStart ? LegacyBound.BOTTOM : LegacyBound.TOP; +- +- List components = metadata.isCompound() +- ? CompositeType.deconstruct(bound) +- : Collections.singletonList(new CompositeType.CompositeComponent(bound, (byte) 0)); +- +- // Either it's a prefix of the clustering, or it's the bound of a collection range tombstone (and thus has +- // the collection column name) +- assert components.size() <= metadata.comparator.size() || (!metadata.isCompactTable() && components.size() == metadata.comparator.size() + 1); +- +- List prefix = components.size() <= metadata.comparator.size() +- ? components +- : components.subList(0, metadata.comparator.size()); +- ClusteringPrefix.Kind boundKind; +- if (isStart) +- { +- if (components.get(components.size() - 1).eoc > 0) +- boundKind = ClusteringPrefix.Kind.EXCL_START_BOUND; +- else +- boundKind = ClusteringPrefix.Kind.INCL_START_BOUND; +- } +- else +- { +- if (components.get(components.size() - 1).eoc < 0) +- boundKind = ClusteringPrefix.Kind.EXCL_END_BOUND; +- else +- boundKind = ClusteringPrefix.Kind.INCL_END_BOUND; +- } +- +- ByteBuffer[] prefixValues = new ByteBuffer[prefix.size()]; +- for (int i = 0; i < prefix.size(); i++) +- prefixValues[i] = prefix.get(i).value; +- ClusteringBound sb = ClusteringBound.create(boundKind, prefixValues); +- +- ColumnDefinition collectionName = components.size() == metadata.comparator.size() + 1 +- ? metadata.getColumnDefinition(components.get(metadata.comparator.size()).value) +- : null; +- return new LegacyBound(sb, metadata.isCompound() && CompositeType.isStaticName(bound), collectionName); +- } +- +- public static ByteBuffer encodeBound(CFMetaData metadata, ClusteringBound bound, boolean isStart) +- { +- if (bound == ClusteringBound.BOTTOM || bound == ClusteringBound.TOP || metadata.comparator.size() == 0) +- return ByteBufferUtil.EMPTY_BYTE_BUFFER; +- +- ClusteringPrefix clustering = bound.clustering(); +- +- if (!metadata.isCompound()) +- { +- assert clustering.size() == 1; +- return clustering.get(0); +- } +- +- CompositeType ctype = CompositeType.getInstance(metadata.comparator.subtypes()); +- CompositeType.Builder builder = ctype.builder(); +- for (int i = 0; i < clustering.size(); i++) +- builder.add(clustering.get(i)); +- +- if (isStart) +- return bound.isInclusive() ? builder.build() : builder.buildAsEndOfRange(); +- else +- return bound.isInclusive() ? builder.buildAsEndOfRange() : builder.build(); +- } +- +- public static ByteBuffer encodeCellName(CFMetaData metadata, ClusteringPrefix clustering, ByteBuffer columnName, ByteBuffer collectionElement) +- { +- boolean isStatic = clustering == Clustering.STATIC_CLUSTERING; +- +- if (!metadata.isCompound()) +- { +- if (isStatic) +- return columnName; +- +- assert clustering.size() == 1 : "Expected clustering size to be 1, but was " + clustering.size(); +- return clustering.get(0); +- } +- +- // We use comparator.size() rather than clustering.size() because of static clusterings +- int clusteringSize = metadata.comparator.size(); +- int size = clusteringSize + (metadata.isDense() ? 0 : 1) + (collectionElement == null ? 0 : 1); +- if (metadata.isSuper()) +- size = clusteringSize + 1; +- ByteBuffer[] values = new ByteBuffer[size]; +- for (int i = 0; i < clusteringSize; i++) +- { +- if (isStatic) +- { +- values[i] = ByteBufferUtil.EMPTY_BYTE_BUFFER; +- continue; +- } +- +- ByteBuffer v = clustering.get(i); +- // we can have null (only for dense compound tables for backward compatibility reasons) but that +- // means we're done and should stop there as far as building the composite is concerned. +- if (v == null) +- return CompositeType.build(Arrays.copyOfRange(values, 0, i)); +- +- values[i] = v; +- } +- +- if (metadata.isSuper()) +- { +- // We need to set the "column" (in thrift terms) name, i.e. the value corresponding to the subcomparator. +- // What it is depends if this a cell for a declared "static" column or a "dynamic" column part of the +- // super-column internal map. +- assert columnName != null; // This should never be null for supercolumns, see decodeForSuperColumn() above +- values[clusteringSize] = columnName.equals(CompactTables.SUPER_COLUMN_MAP_COLUMN) +- ? collectionElement +- : columnName; +- } +- else +- { +- if (!metadata.isDense()) +- values[clusteringSize] = columnName; +- if (collectionElement != null) +- values[clusteringSize + 1] = collectionElement; +- } +- +- return CompositeType.build(isStatic, values); +- } +- +- public static Clustering decodeClustering(CFMetaData metadata, ByteBuffer value) +- { +- int csize = metadata.comparator.size(); +- if (csize == 0) +- return Clustering.EMPTY; +- +- if (metadata.isCompound() && CompositeType.isStaticName(value)) +- return Clustering.STATIC_CLUSTERING; +- +- List components = metadata.isCompound() +- ? CompositeType.splitName(value) +- : Collections.singletonList(value); +- +- return Clustering.make(components.subList(0, Math.min(csize, components.size())).toArray(new ByteBuffer[csize])); +- } +- +- public static ByteBuffer encodeClustering(CFMetaData metadata, ClusteringPrefix clustering) +- { +- if (clustering.size() == 0) +- return ByteBufferUtil.EMPTY_BYTE_BUFFER; +- +- if (!metadata.isCompound()) +- { +- assert clustering.size() == 1; +- return clustering.get(0); +- } +- +- ByteBuffer[] values = new ByteBuffer[clustering.size()]; +- for (int i = 0; i < clustering.size(); i++) +- values[i] = clustering.get(i); +- return CompositeType.build(values); +- } +- +- /** +- * The maximum number of cells to include per partition when converting to the old format. +- *

+- * We already apply the limit during the actual query, but for queries that counts cells and not rows (thrift queries +- * and distinct queries as far as old nodes are concerned), we may still include a little bit more than requested +- * because {@link DataLimits} always include full rows. So if the limit ends in the middle of a queried row, the +- * full row will be part of our result. This would confuse old nodes however so we make sure to truncate it to +- * what's expected before writting it on the wire. +- * +- * @param command the read commmand for which to determine the maximum cells per partition. This can be {@code null} +- * in which case {@code Integer.MAX_VALUE} is returned. +- * @return the maximum number of cells per partition that should be enforced according to the read command if +- * post-query limitation are in order (see above). This will be {@code Integer.MAX_VALUE} if no such limits are +- * necessary. +- */ +- private static int maxCellsPerPartition(ReadCommand command) +- { +- if (command == null) +- return Integer.MAX_VALUE; +- +- DataLimits limits = command.limits(); +- +- // There is 2 types of DISTINCT queries: those that includes only the partition key, and those that include static columns. +- // On old nodes, the latter expects the first row in term of CQL count, which is what we already have and there is no additional +- // limit to apply. The former however expect only one cell per partition and rely on it (See CASSANDRA-10762). +- if (limits.isDistinct()) +- return command.columnFilter().fetchedColumns().statics.isEmpty() ? 1 : Integer.MAX_VALUE; +- +- switch (limits.kind()) +- { +- case THRIFT_LIMIT: +- case SUPER_COLUMN_COUNTING_LIMIT: +- return limits.perPartitionCount(); +- default: +- return Integer.MAX_VALUE; +- } +- } +- +- // For serializing to old wire format +- public static LegacyUnfilteredPartition fromUnfilteredRowIterator(ReadCommand command, UnfilteredRowIterator iterator) +- { +- // we need to extract the range tombstone so materialize the partition. Since this is +- // used for the on-wire format, this is not worst than it used to be. +- final ImmutableBTreePartition partition = ImmutableBTreePartition.create(iterator); +- DeletionInfo info = partition.deletionInfo(); +- Pair> pair = fromRowIterator(partition.metadata(), partition.iterator(), partition.staticRow()); +- +- LegacyLayout.LegacyRangeTombstoneList rtl = pair.left; +- +- // Processing the cell iterator results in the LegacyRangeTombstoneList being populated, so we do this +- // before we use the LegacyRangeTombstoneList at all +- List cells = Lists.newArrayList(pair.right); +- +- int maxCellsPerPartition = maxCellsPerPartition(command); +- if (cells.size() > maxCellsPerPartition) +- cells = cells.subList(0, maxCellsPerPartition); +- +- // The LegacyRangeTombstoneList already has range tombstones for the single-row deletions and complex +- // deletions. Go through our normal range tombstones and add then to the LegacyRTL so that the range +- // tombstones all get merged and sorted properly. +- if (info.hasRanges()) +- { +- Iterator rangeTombstoneIterator = info.rangeIterator(false); +- while (rangeTombstoneIterator.hasNext()) +- { +- RangeTombstone rt = rangeTombstoneIterator.next(); +- Slice slice = rt.deletedSlice(); +- LegacyLayout.LegacyBound start = new LegacyLayout.LegacyBound(slice.start(), false, null); +- LegacyLayout.LegacyBound end = new LegacyLayout.LegacyBound(slice.end(), false, null); +- rtl.add(start, end, rt.deletionTime().markedForDeleteAt(), rt.deletionTime().localDeletionTime()); +- } +- } +- +- return new LegacyUnfilteredPartition(info.getPartitionDeletion(), rtl, cells); +- } +- +- public static void serializeAsLegacyPartition(ReadCommand command, UnfilteredRowIterator partition, DataOutputPlus out, int version) throws IOException +- { +- assert version < MessagingService.VERSION_30; +- +- out.writeBoolean(true); +- +- LegacyLayout.LegacyUnfilteredPartition legacyPartition = LegacyLayout.fromUnfilteredRowIterator(command, partition); +- +- UUIDSerializer.serializer.serialize(partition.metadata().cfId, out, version); +- DeletionTime.serializer.serialize(legacyPartition.partitionDeletion, out); +- +- legacyPartition.rangeTombstones.serialize(out, partition.metadata()); +- +- // begin cell serialization +- out.writeInt(legacyPartition.cells.size()); +- for (LegacyLayout.LegacyCell cell : legacyPartition.cells) +- { +- ByteBufferUtil.writeWithShortLength(cell.name.encode(partition.metadata()), out); +- out.writeByte(cell.serializationFlags()); +- if (cell.isExpiring()) +- { +- out.writeInt(cell.ttl); +- out.writeInt(cell.localDeletionTime); +- } +- else if (cell.isTombstone()) +- { +- out.writeLong(cell.timestamp); +- out.writeInt(TypeSizes.sizeof(cell.localDeletionTime)); +- out.writeInt(cell.localDeletionTime); +- continue; +- } +- else if (cell.isCounterUpdate()) +- { +- out.writeLong(cell.timestamp); +- long count = CounterContext.instance().getLocalCount(cell.value); +- ByteBufferUtil.writeWithLength(ByteBufferUtil.bytes(count), out); +- continue; +- } +- else if (cell.isCounter()) +- { +- out.writeLong(Long.MIN_VALUE); // timestampOfLastDelete (not used, and MIN_VALUE is the default) +- } +- +- out.writeLong(cell.timestamp); +- ByteBufferUtil.writeWithLength(cell.value, out); +- } +- } +- +- // For the old wire format +- // Note: this can return null if an empty partition is serialized! +- public static UnfilteredRowIterator deserializeLegacyPartition(DataInputPlus in, int version, SerializationHelper.Flag flag, ByteBuffer key) throws IOException +- { +- assert version < MessagingService.VERSION_30; +- +- // This is only used in mutation, and mutation have never allowed "null" column families +- boolean present = in.readBoolean(); +- if (!present) +- return null; +- +- CFMetaData metadata = CFMetaData.serializer.deserialize(in, version); +- LegacyDeletionInfo info = LegacyDeletionInfo.deserialize(metadata, in); +- int size = in.readInt(); +- Iterator cells = deserializeCells(metadata, in, flag, size); +- SerializationHelper helper = new SerializationHelper(metadata, version, flag); +- return onWireCellstoUnfilteredRowIterator(metadata, metadata.partitioner.decorateKey(key), info, cells, false, helper); +- } +- +- // For the old wire format +- public static long serializedSizeAsLegacyPartition(ReadCommand command, UnfilteredRowIterator partition, int version) +- { +- assert version < MessagingService.VERSION_30; +- +- if (partition.isEmpty()) +- return TypeSizes.sizeof(false); +- +- long size = TypeSizes.sizeof(true); +- +- LegacyLayout.LegacyUnfilteredPartition legacyPartition = LegacyLayout.fromUnfilteredRowIterator(command, partition); +- +- size += UUIDSerializer.serializer.serializedSize(partition.metadata().cfId, version); +- size += DeletionTime.serializer.serializedSize(legacyPartition.partitionDeletion); +- size += legacyPartition.rangeTombstones.serializedSize(partition.metadata()); +- +- // begin cell serialization +- size += TypeSizes.sizeof(legacyPartition.cells.size()); +- for (LegacyLayout.LegacyCell cell : legacyPartition.cells) +- { +- size += ByteBufferUtil.serializedSizeWithShortLength(cell.name.encode(partition.metadata())); +- size += 1; // serialization flags +- if (cell.kind == LegacyLayout.LegacyCell.Kind.EXPIRING) +- { +- size += TypeSizes.sizeof(cell.ttl); +- size += TypeSizes.sizeof(cell.localDeletionTime); +- } +- else if (cell.kind == LegacyLayout.LegacyCell.Kind.DELETED) +- { +- size += TypeSizes.sizeof(cell.timestamp); +- // localDeletionTime replaces cell.value as the body +- size += TypeSizes.sizeof(TypeSizes.sizeof(cell.localDeletionTime)); +- size += TypeSizes.sizeof(cell.localDeletionTime); +- continue; +- } +- else if (cell.kind == LegacyLayout.LegacyCell.Kind.COUNTER) +- { +- size += TypeSizes.sizeof(Long.MIN_VALUE); // timestampOfLastDelete +- } +- +- size += TypeSizes.sizeof(cell.timestamp); +- size += ByteBufferUtil.serializedSizeWithLength(cell.value); +- } +- +- return size; +- } +- +- // For thrift sake +- public static UnfilteredRowIterator toUnfilteredRowIterator(CFMetaData metadata, +- DecoratedKey key, +- LegacyDeletionInfo delInfo, +- Iterator cells) +- { +- SerializationHelper helper = new SerializationHelper(metadata, 0, SerializationHelper.Flag.LOCAL); +- return toUnfilteredRowIterator(metadata, key, delInfo, cells, false, helper); +- } +- +- // For deserializing old wire format +- public static UnfilteredRowIterator onWireCellstoUnfilteredRowIterator(CFMetaData metadata, +- DecoratedKey key, +- LegacyDeletionInfo delInfo, +- Iterator cells, +- boolean reversed, +- SerializationHelper helper) +- { +- +- // If the table is a static compact, the "column_metadata" are now internally encoded as +- // static. This has already been recognized by decodeCellName, but it means the cells +- // provided are not in the expected order (the "static" cells are not necessarily at the front). +- // So sort them to make sure toUnfilteredRowIterator works as expected. +- // Further, if the query is reversed, then the on-wire format still has cells in non-reversed +- // order, but we need to have them reverse in the final UnfilteredRowIterator. So reverse them. +- if (metadata.isStaticCompactTable() || reversed) +- { +- List l = new ArrayList<>(); +- Iterators.addAll(l, cells); +- Collections.sort(l, legacyCellComparator(metadata, reversed)); +- cells = l.iterator(); +- } +- +- return toUnfilteredRowIterator(metadata, key, delInfo, cells, reversed, helper); +- } +- +- private static UnfilteredRowIterator toUnfilteredRowIterator(CFMetaData metadata, +- DecoratedKey key, +- LegacyDeletionInfo delInfo, +- Iterator cells, +- boolean reversed, +- SerializationHelper helper) +- { +- // A reducer that basically does nothing, we know the 2 merged iterators can't have conflicting atoms (since we merge cells with range tombstones). +- MergeIterator.Reducer reducer = new MergeIterator.Reducer() +- { +- private LegacyAtom atom; +- +- public void reduce(int idx, LegacyAtom current) +- { +- // We're merging cell with range tombstones, so we should always only have a single atom to reduce. +- assert atom == null; +- atom = current; +- } +- +- protected LegacyAtom getReduced() +- { +- return atom; +- } +- +- protected void onKeyChange() +- { +- atom = null; +- } +- }; +- List> iterators = Arrays.asList(asLegacyAtomIterator(cells), asLegacyAtomIterator(delInfo.inRowRangeTombstones())); +- PeekingIterator atoms = Iterators.peekingIterator(MergeIterator.get(iterators, legacyAtomComparator(metadata), reducer)); +- +- // Check if we have some static +- Row staticRow = atoms.hasNext() && atoms.peek().isStatic() +- ? getNextRow(CellGrouper.staticGrouper(metadata, helper), atoms) +- : Rows.EMPTY_STATIC_ROW; +- +- Iterator rows = convertToRows(new CellGrouper(metadata, helper), atoms); +- Iterator ranges = delInfo.deletionInfo.rangeIterator(reversed); +- return new RowAndDeletionMergeIterator(metadata, +- key, +- delInfo.deletionInfo.getPartitionDeletion(), +- ColumnFilter.all(metadata), +- staticRow, +- reversed, +- EncodingStats.NO_STATS, +- rows, +- ranges, +- true); +- } +- +- public static Row extractStaticColumns(CFMetaData metadata, DataInputPlus in, Columns statics) throws IOException +- { +- assert !statics.isEmpty(); +- assert metadata.isCompactTable(); +- +- if (metadata.isSuper()) +- // TODO: there is in practice nothing to do here, but we need to handle the column_metadata for super columns somewhere else +- throw new UnsupportedOperationException(); +- +- Set columnsToFetch = new HashSet<>(statics.size()); +- for (ColumnDefinition column : statics) +- columnsToFetch.add(column.name.bytes); +- +- Row.Builder builder = BTreeRow.unsortedBuilder(FBUtilities.nowInSeconds()); +- builder.newRow(Clustering.STATIC_CLUSTERING); +- +- boolean foundOne = false; +- LegacyAtom atom; +- while ((atom = readLegacyAtom(metadata, in, false)) != null) +- { +- if (atom.isCell()) +- { +- LegacyCell cell = atom.asCell(); +- if (!columnsToFetch.contains(cell.name.encode(metadata))) +- continue; +- +- foundOne = true; +- builder.addCell(new BufferCell(cell.name.column, cell.timestamp, cell.ttl, cell.localDeletionTime, cell.value, null)); +- } +- else +- { +- LegacyRangeTombstone tombstone = atom.asRangeTombstone(); +- // TODO: we need to track tombstones and potentially ignore cells that are +- // shadowed (or even better, replace them by tombstones). +- throw new UnsupportedOperationException(); +- } +- } +- +- return foundOne ? builder.build() : Rows.EMPTY_STATIC_ROW; +- } +- +- private static Row getNextRow(CellGrouper grouper, PeekingIterator cells) +- { +- if (!cells.hasNext()) +- return null; +- +- grouper.reset(); +- while (cells.hasNext() && grouper.addAtom(cells.peek())) +- { +- // We've added the cell already in the grouper, so just skip it +- cells.next(); +- } +- return grouper.getRow(); +- } +- +- @SuppressWarnings("unchecked") +- private static Iterator asLegacyAtomIterator(Iterator iter) +- { +- return (Iterator)iter; +- } +- +- private static Iterator convertToRows(final CellGrouper grouper, final PeekingIterator atoms) +- { +- return new AbstractIterator() +- { +- protected Row computeNext() +- { +- if (!atoms.hasNext()) +- return endOfData(); +- +- return getNextRow(grouper, atoms); +- } +- }; +- } +- +- public static Pair> fromRowIterator(final RowIterator iterator) +- { +- return fromRowIterator(iterator.metadata(), iterator, iterator.staticRow()); +- } +- +- private static Pair> fromRowIterator(final CFMetaData metadata, final Iterator iterator, final Row staticRow) +- { +- LegacyRangeTombstoneList deletions = new LegacyRangeTombstoneList(new LegacyBoundComparator(metadata.comparator), 10); +- Iterator cells = new AbstractIterator() +- { +- private Iterator currentRow = initializeRow(); +- +- private Iterator initializeRow() +- { +- if (staticRow == null || staticRow.isEmpty()) +- return Collections.emptyIterator(); +- +- Pair> row = fromRow(metadata, staticRow); +- deletions.addAll(row.left); +- return row.right; +- } +- +- protected LegacyCell computeNext() +- { +- while (true) +- { +- if (currentRow.hasNext()) +- return currentRow.next(); +- +- if (!iterator.hasNext()) +- return endOfData(); +- +- Pair> row = fromRow(metadata, iterator.next()); +- deletions.addAll(row.left); +- currentRow = row.right; +- } +- } +- }; +- +- return Pair.create(deletions, cells); +- } +- +- private static Pair> fromRow(final CFMetaData metadata, final Row row) +- { +- // convert any complex deletions or row deletion into normal range tombstones so that we can build and send a proper RangeTombstoneList +- // to legacy nodes +- LegacyRangeTombstoneList deletions = new LegacyRangeTombstoneList(new LegacyBoundComparator(metadata.comparator), 10); +- +- if (!row.deletion().isLive()) +- { +- Clustering clustering = row.clustering(); +- ClusteringBound startBound = ClusteringBound.inclusiveStartOf(clustering); +- ClusteringBound endBound = ClusteringBound.inclusiveEndOf(clustering); +- +- LegacyBound start = new LegacyLayout.LegacyBound(startBound, false, null); +- LegacyBound end = new LegacyLayout.LegacyBound(endBound, false, null); +- +- deletions.add(start, end, row.deletion().time().markedForDeleteAt(), row.deletion().time().localDeletionTime()); +- } +- +- for (ColumnData cd : row) +- { +- ColumnDefinition col = cd.column(); +- if (col.isSimple()) +- continue; +- +- DeletionTime delTime = ((ComplexColumnData)cd).complexDeletion(); +- if (!delTime.isLive()) +- { +- Clustering clustering = row.clustering(); +- +- ClusteringBound startBound = ClusteringBound.inclusiveStartOf(clustering); +- ClusteringBound endBound = ClusteringBound.inclusiveEndOf(clustering); +- +- LegacyLayout.LegacyBound start = new LegacyLayout.LegacyBound(startBound, col.isStatic(), col); +- LegacyLayout.LegacyBound end = new LegacyLayout.LegacyBound(endBound, col.isStatic(), col); +- +- deletions.add(start, end, delTime.markedForDeleteAt(), delTime.localDeletionTime()); +- } +- } +- +- Iterator cells = new AbstractIterator() +- { +- private final Iterator cells = row.cellsInLegacyOrder(metadata, false).iterator(); +- // we don't have (and shouldn't have) row markers for compact tables. +- private boolean hasReturnedRowMarker = metadata.isCompactTable(); +- +- protected LegacyCell computeNext() +- { +- if (!hasReturnedRowMarker) +- { +- hasReturnedRowMarker = true; +- +- // don't include a row marker if there's no timestamp on the primary key; this is the 3.0+ equivalent +- // of a row marker +- if (!row.primaryKeyLivenessInfo().isEmpty()) +- { +- LegacyCellName cellName = new LegacyCellName(row.clustering(), null, null); +- LivenessInfo info = row.primaryKeyLivenessInfo(); +- return new LegacyCell(info.isExpiring() ? LegacyCell.Kind.EXPIRING : LegacyCell.Kind.REGULAR, cellName, ByteBufferUtil.EMPTY_BYTE_BUFFER, info.timestamp(), info.localExpirationTime(), info.ttl()); +- } +- } +- +- if (!cells.hasNext()) +- return endOfData(); +- +- return makeLegacyCell(row.clustering(), cells.next()); +- } +- }; +- return Pair.create(deletions, cells); +- } +- +- private static LegacyCell makeLegacyCell(Clustering clustering, Cell cell) +- { +- LegacyCell.Kind kind; +- if (cell.isCounterCell()) +- kind = LegacyCell.Kind.COUNTER; +- else if (cell.isTombstone()) +- kind = LegacyCell.Kind.DELETED; +- else if (cell.isExpiring()) +- kind = LegacyCell.Kind.EXPIRING; +- else +- kind = LegacyCell.Kind.REGULAR; +- +- CellPath path = cell.path(); +- assert path == null || path.size() == 1; +- LegacyCellName name = new LegacyCellName(clustering, cell.column(), path == null ? null : path.get(0)); +- return new LegacyCell(kind, name, cell.value(), cell.timestamp(), cell.localDeletionTime(), cell.ttl()); +- } +- +- public static RowIterator toRowIterator(final CFMetaData metadata, +- final DecoratedKey key, +- final Iterator cells, +- final int nowInSec) +- { +- SerializationHelper helper = new SerializationHelper(metadata, 0, SerializationHelper.Flag.LOCAL); +- return UnfilteredRowIterators.filter(toUnfilteredRowIterator(metadata, key, LegacyDeletionInfo.live(), cells, false, helper), nowInSec); +- } +- +- public static Comparator legacyCellComparator(CFMetaData metadata) +- { +- return legacyCellComparator(metadata, false); +- } +- +- public static Comparator legacyCellComparator(final CFMetaData metadata, final boolean reversed) +- { +- final Comparator cellNameComparator = legacyCellNameComparator(metadata, reversed); +- return new Comparator() +- { +- public int compare(LegacyCell cell1, LegacyCell cell2) +- { +- LegacyCellName c1 = cell1.name; +- LegacyCellName c2 = cell2.name; +- +- int c = cellNameComparator.compare(c1, c2); +- if (c != 0) +- return c; +- +- // The actual sorting when the cellname is equal doesn't matter, we just want to make +- // sure the cells are not considered equal. +- if (cell1.timestamp != cell2.timestamp) +- return cell1.timestamp < cell2.timestamp ? -1 : 1; +- +- if (cell1.localDeletionTime != cell2.localDeletionTime) +- return cell1.localDeletionTime < cell2.localDeletionTime ? -1 : 1; +- +- return cell1.value.compareTo(cell2.value); +- } +- }; +- } +- +- // Note that this doesn't exactly compare cells as they were pre-3.0 because within a row they sort columns like +- // in 3.0, that is, with simple columns before complex columns. In other words, this comparator makes sure cells +- // are in the proper order to convert them to actual 3.0 rows. +- public static Comparator legacyCellNameComparator(final CFMetaData metadata, final boolean reversed) +- { +- return new Comparator() +- { +- public int compare(LegacyCellName c1, LegacyCellName c2) +- { +- // Compare clustering first +- if (c1.clustering == Clustering.STATIC_CLUSTERING) +- { +- if (c2.clustering != Clustering.STATIC_CLUSTERING) +- return -1; +- } +- else if (c2.clustering == Clustering.STATIC_CLUSTERING) +- { +- return 1; +- } +- else +- { +- int c = metadata.comparator.compare(c1.clustering, c2.clustering); +- if (c != 0) +- return reversed ? -c : c; +- } +- +- // Note that when reversed, we only care about the clustering being reversed, so it's ok +- // not to take reversed into account below. +- +- // Then check the column name +- if (c1.column != c2.column) +- { +- // A null for the column means it's a row marker +- if (c1.column == null) +- return -1; +- if (c2.column == null) +- return 1; +- +- assert c1.column.isRegular() || c1.column.isStatic(); +- assert c2.column.isRegular() || c2.column.isStatic(); +- int cmp = c1.column.compareTo(c2.column); +- if (cmp != 0) +- return cmp; +- } +- +- assert (c1.collectionElement == null) == (c2.collectionElement == null); +- +- if (c1.collectionElement != null) +- { +- AbstractType colCmp = ((CollectionType)c1.column.type).nameComparator(); +- return colCmp.compare(c1.collectionElement, c2.collectionElement); +- } +- return 0; +- } +- }; +- } +- +- private static boolean equalValues(ClusteringPrefix c1, ClusteringPrefix c2, ClusteringComparator comparator) +- { +- assert c1.size() == c2.size(); +- for (int i = 0; i < c1.size(); i++) +- { +- if (comparator.compareComponent(i, c1.get(i), c2.get(i)) != 0) +- return false; +- } +- return true; +- } +- +- private static Comparator legacyAtomComparator(CFMetaData metadata) +- { +- return (o1, o2) -> +- { +- // First we want to compare by clustering, but we have to be careful with range tombstone, because +- // we can have collection deletion and we want those to sort properly just before the column they +- // delete, not before the whole row. +- // We also want to special case static so they sort before any non-static. Note in particular that +- // this special casing is important in the case of one of the Atom being Bound.BOTTOM: we want +- // it to sort after the static as we deal with static first in toUnfilteredAtomIterator and having +- // Bound.BOTTOM first would mess that up (note that static deletion is handled through a specific +- // static tombstone, see LegacyDeletionInfo.add()). +- if (o1.isStatic() != o2.isStatic()) +- return o1.isStatic() ? -1 : 1; +- +- ClusteringPrefix c1 = o1.clustering(); +- ClusteringPrefix c2 = o2.clustering(); +- +- int clusteringComparison; +- if (c1.size() != c2.size() || (o1.isCell() == o2.isCell()) || !equalValues(c1, c2, metadata.comparator)) +- { +- clusteringComparison = metadata.comparator.compare(c1, c2); +- } +- else +- { +- // one is a cell and one is a range tombstone, and both have the same prefix size (that is, the +- // range tombstone is either a row deletion or a collection deletion). +- LegacyRangeTombstone rt = o1.isCell() ? o2.asRangeTombstone() : o1.asRangeTombstone(); +- clusteringComparison = rt.isCollectionTombstone() +- ? 0 +- : metadata.comparator.compare(c1, c2); +- } +- +- // Note that if both are range tombstones and have the same clustering, then they are equal. +- if (clusteringComparison != 0) +- return clusteringComparison; +- +- if (o1.isCell()) +- { +- LegacyCell cell1 = o1.asCell(); +- if (o2.isCell()) +- { +- LegacyCell cell2 = o2.asCell(); +- // Check for row marker cells +- if (cell1.name.column == null) +- return cell2.name.column == null ? 0 : -1; +- return cell2.name.column == null ? 1 : cell1.name.column.compareTo(cell2.name.column); +- } +- +- LegacyRangeTombstone rt2 = o2.asRangeTombstone(); +- assert rt2.isCollectionTombstone(); // otherwise, we shouldn't have got a clustering equality +- if (cell1.name.column == null) +- return -1; +- int cmp = cell1.name.column.compareTo(rt2.start.collectionName); +- // If both are for the same column, then the RT should come first +- return cmp == 0 ? 1 : cmp; +- } +- else +- { +- assert o2.isCell(); +- LegacyCell cell2 = o2.asCell(); +- +- LegacyRangeTombstone rt1 = o1.asRangeTombstone(); +- assert rt1.isCollectionTombstone(); // otherwise, we shouldn't have got a clustering equality +- +- if (cell2.name.column == null) +- return 1; +- +- int cmp = rt1.start.collectionName.compareTo(cell2.name.column); +- // If both are for the same column, then the RT should come first +- return cmp == 0 ? -1 : cmp; +- } +- }; +- } +- +- public static LegacyAtom readLegacyAtom(CFMetaData metadata, DataInputPlus in, boolean readAllAsDynamic) throws IOException +- { +- while (true) +- { +- ByteBuffer cellname = ByteBufferUtil.readWithShortLength(in); +- if (!cellname.hasRemaining()) +- return null; // END_OF_ROW +- +- try +- { +- int b = in.readUnsignedByte(); +- return (b & RANGE_TOMBSTONE_MASK) != 0 +- ? readLegacyRangeTombstoneBody(metadata, in, cellname) +- : readLegacyCellBody(metadata, in, cellname, b, SerializationHelper.Flag.LOCAL, readAllAsDynamic); +- } +- catch (UnknownColumnException e) +- { +- // We can get there if we read a cell for a dropped column, and ff that is the case, +- // then simply ignore the cell is fine. But also not that we ignore if it's the +- // system keyspace because for those table we actually remove columns without registering +- // them in the dropped columns +- assert metadata.ksName.equals(SystemKeyspace.NAME) || metadata.getDroppedColumnDefinition(e.columnName) != null : e.getMessage(); +- } +- } +- } +- +- public static LegacyCell readLegacyCell(CFMetaData metadata, DataInput in, SerializationHelper.Flag flag) throws IOException, UnknownColumnException +- { +- ByteBuffer cellname = ByteBufferUtil.readWithShortLength(in); +- int b = in.readUnsignedByte(); +- return readLegacyCellBody(metadata, in, cellname, b, flag, false); +- } +- +- public static LegacyCell readLegacyCellBody(CFMetaData metadata, DataInput in, ByteBuffer cellname, int mask, SerializationHelper.Flag flag, boolean readAllAsDynamic) +- throws IOException, UnknownColumnException +- { +- // Note that we want to call decodeCellName only after we've deserialized other parts, since it can throw +- // and we want to throw only after having deserialized the full cell. +- if ((mask & COUNTER_MASK) != 0) +- { +- in.readLong(); // timestampOfLastDelete: this has been unused for a long time so we ignore it +- long ts = in.readLong(); +- ByteBuffer value = ByteBufferUtil.readWithLength(in); +- if (flag == SerializationHelper.Flag.FROM_REMOTE || (flag == SerializationHelper.Flag.LOCAL && CounterContext.instance().shouldClearLocal(value))) +- value = CounterContext.instance().clearAllLocal(value); +- return new LegacyCell(LegacyCell.Kind.COUNTER, decodeCellName(metadata, cellname, readAllAsDynamic), value, ts, Cell.NO_DELETION_TIME, Cell.NO_TTL); +- } +- else if ((mask & EXPIRATION_MASK) != 0) +- { +- int ttl = in.readInt(); +- int expiration = in.readInt(); +- long ts = in.readLong(); +- ByteBuffer value = ByteBufferUtil.readWithLength(in); +- return new LegacyCell(LegacyCell.Kind.EXPIRING, decodeCellName(metadata, cellname, readAllAsDynamic), value, ts, expiration, ttl); +- } +- else +- { +- long ts = in.readLong(); +- ByteBuffer value = ByteBufferUtil.readWithLength(in); +- LegacyCellName name = decodeCellName(metadata, cellname, readAllAsDynamic); +- return (mask & COUNTER_UPDATE_MASK) != 0 +- ? new LegacyCell(LegacyCell.Kind.COUNTER, name, CounterContext.instance().createLocal(ByteBufferUtil.toLong(value)), ts, Cell.NO_DELETION_TIME, Cell.NO_TTL) +- : ((mask & DELETION_MASK) == 0 +- ? new LegacyCell(LegacyCell.Kind.REGULAR, name, value, ts, Cell.NO_DELETION_TIME, Cell.NO_TTL) +- : new LegacyCell(LegacyCell.Kind.DELETED, name, ByteBufferUtil.EMPTY_BYTE_BUFFER, ts, ByteBufferUtil.toInt(value), Cell.NO_TTL)); +- } +- } +- +- public static LegacyRangeTombstone readLegacyRangeTombstoneBody(CFMetaData metadata, DataInputPlus in, ByteBuffer boundname) throws IOException +- { +- LegacyBound min = decodeBound(metadata, boundname, true); +- LegacyBound max = decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), false); +- DeletionTime dt = DeletionTime.serializer.deserialize(in); +- return new LegacyRangeTombstone(min, max, dt); +- } +- +- public static Iterator deserializeCells(final CFMetaData metadata, +- final DataInput in, +- final SerializationHelper.Flag flag, +- final int size) +- { +- return new AbstractIterator() +- { +- private int i = 0; +- +- protected LegacyCell computeNext() +- { +- if (i >= size) +- return endOfData(); +- +- ++i; +- try +- { +- return readLegacyCell(metadata, in, flag); +- } +- catch (UnknownColumnException e) +- { +- // We can get there if we read a cell for a dropped column, and if that is the case, +- // then simply ignore the cell is fine. But also not that we ignore if it's the +- // system keyspace because for those table we actually remove columns without registering +- // them in the dropped columns +- if (metadata.ksName.equals(SystemKeyspace.NAME) || metadata.getDroppedColumnDefinition(e.columnName) != null) +- return computeNext(); +- else +- throw new IOError(e); +- } +- catch (IOException e) +- { +- throw new IOError(e); +- } +- } +- }; +- } +- +- public static class CellGrouper +- { +- public final CFMetaData metadata; +- private final boolean isStatic; +- private final SerializationHelper helper; +- private final Row.Builder builder; +- private Clustering clustering; +- +- private LegacyRangeTombstone rowDeletion; +- private LegacyRangeTombstone collectionDeletion; +- +- public CellGrouper(CFMetaData metadata, SerializationHelper helper) +- { +- this(metadata, helper, false); +- } +- +- private CellGrouper(CFMetaData metadata, SerializationHelper helper, boolean isStatic) +- { +- this.metadata = metadata; +- this.isStatic = isStatic; +- this.helper = helper; +- // We cannot use a sorted builder because we don't have exactly the same ordering in 3.0 and pre-3.0. More precisely, within a row, we +- // store all simple columns before the complex ones in 3.0, which we use to sort everything sorted by the column name before. Note however +- // that the unsorted builder won't have to reconcile cells, so the exact value we pass for nowInSec doesn't matter. +- this.builder = BTreeRow.unsortedBuilder(FBUtilities.nowInSeconds()); +- } +- +- public static CellGrouper staticGrouper(CFMetaData metadata, SerializationHelper helper) +- { +- return new CellGrouper(metadata, helper, true); +- } +- +- public void reset() +- { +- this.clustering = null; +- this.rowDeletion = null; +- this.collectionDeletion = null; +- } +- +- public boolean addAtom(LegacyAtom atom) +- { +- return atom.isCell() +- ? addCell(atom.asCell()) +- : addRangeTombstone(atom.asRangeTombstone()); +- } +- +- public boolean addCell(LegacyCell cell) +- { +- if (clustering == null) +- { +- clustering = cell.name.clustering; +- assert !isStatic || clustering == Clustering.STATIC_CLUSTERING; +- builder.newRow(clustering); +- } +- else if (!clustering.equals(cell.name.clustering)) +- { +- return false; +- } +- +- // Ignore shadowed cells +- if (rowDeletion != null && rowDeletion.deletionTime.deletes(cell.timestamp)) +- return true; +- +- ColumnDefinition column = cell.name.column; +- if (column == null) +- { +- // It's the row marker +- assert !cell.value.hasRemaining(); +- builder.addPrimaryKeyLivenessInfo(LivenessInfo.withExpirationTime(cell.timestamp, cell.ttl, cell.localDeletionTime)); +- } +- else +- { +- if (collectionDeletion != null && collectionDeletion.start.collectionName.name.equals(column.name) && collectionDeletion.deletionTime.deletes(cell.timestamp)) +- return true; +- +- if (helper.includes(column)) +- { +- CellPath path = null; +- if (column.isComplex()) +- { +- // Recalling startOfComplexColumn for every cell is a big inefficient, but it's ok in practice +- // and it's simpler. And since 1) this only matter for super column selection in thrift in +- // practice and 2) is only used during upgrade, it's probably worth keeping things simple. +- helper.startOfComplexColumn(column); +- path = cell.name.collectionElement == null ? null : CellPath.create(cell.name.collectionElement); +- if (!helper.includes(path)) +- return true; +- } +- Cell c = new BufferCell(column, cell.timestamp, cell.ttl, cell.localDeletionTime, cell.value, path); +- if (!helper.isDropped(c, column.isComplex())) +- builder.addCell(c); +- if (column.isComplex()) +- { +- helper.endOfComplexColumn(); +- } +- } +- } +- return true; +- } +- +- public boolean addRangeTombstone(LegacyRangeTombstone tombstone) +- { +- if (tombstone.isRowDeletion(metadata)) +- { +- if (clustering != null) +- { +- // If we're already in the row, there might be a chance that there were two range tombstones +- // written, as 2.x storage format does not guarantee just one range tombstone, unlike 3.x. +- // We have to make sure that clustering matches, which would mean that tombstone is for the +- // same row. +- if (rowDeletion != null && clustering.equals(tombstone.start.getAsClustering(metadata))) +- { +- // If the tombstone superceeds the previous delete, we discard the previous one +- if (tombstone.deletionTime.supersedes(rowDeletion.deletionTime)) +- { +- builder.addRowDeletion(Row.Deletion.regular(tombstone.deletionTime)); +- rowDeletion = tombstone; +- } +- return true; +- } +- +- // If we're already within a row and there was no delete written before that one, it can't be the same one +- return false; +- } +- +- clustering = tombstone.start.getAsClustering(metadata); +- builder.newRow(clustering); +- builder.addRowDeletion(Row.Deletion.regular(tombstone.deletionTime)); +- rowDeletion = tombstone; +- return true; +- } +- +- if (tombstone.isCollectionTombstone()) +- { +- if (clustering == null) +- { +- clustering = tombstone.start.getAsClustering(metadata); +- builder.newRow(clustering); +- } +- else if (!clustering.equals(tombstone.start.getAsClustering(metadata))) +- { +- return false; +- } +- +- builder.addComplexDeletion(tombstone.start.collectionName, tombstone.deletionTime); +- if (rowDeletion == null || tombstone.deletionTime.supersedes(rowDeletion.deletionTime)) +- collectionDeletion = tombstone; +- return true; +- } +- return false; +- } +- +- public Row getRow() +- { +- return builder.build(); +- } +- } +- +- public static class LegacyUnfilteredPartition +- { +- public final DeletionTime partitionDeletion; +- public final LegacyRangeTombstoneList rangeTombstones; +- public final List cells; +- +- private LegacyUnfilteredPartition(DeletionTime partitionDeletion, LegacyRangeTombstoneList rangeTombstones, List cells) +- { +- this.partitionDeletion = partitionDeletion; +- this.rangeTombstones = rangeTombstones; +- this.cells = cells; +- } +- +- public void digest(CFMetaData metadata, MessageDigest digest) +- { +- for (LegacyCell cell : cells) +- { +- digest.update(cell.name.encode(metadata).duplicate()); +- +- if (cell.isCounter()) +- CounterContext.instance().updateDigest(digest, cell.value); +- else +- digest.update(cell.value.duplicate()); +- +- FBUtilities.updateWithLong(digest, cell.timestamp); +- FBUtilities.updateWithByte(digest, cell.serializationFlags()); +- +- if (cell.isExpiring()) +- FBUtilities.updateWithInt(digest, cell.ttl); +- +- if (cell.isCounter()) +- { +- // Counters used to have the timestampOfLastDelete field, which we stopped using long ago and has been hard-coded +- // to Long.MIN_VALUE but was still taken into account in 2.2 counter digests (to maintain backward compatibility +- // in the first place). +- FBUtilities.updateWithLong(digest, Long.MIN_VALUE); +- } +- } +- +- if (partitionDeletion.markedForDeleteAt() != Long.MIN_VALUE) +- digest.update(ByteBufferUtil.bytes(partitionDeletion.markedForDeleteAt())); +- +- if (!rangeTombstones.isEmpty()) +- rangeTombstones.updateDigest(digest); +- } +- } +- +- public static class LegacyCellName +- { +- public final Clustering clustering; +- public final ColumnDefinition column; +- public final ByteBuffer collectionElement; +- +- private LegacyCellName(Clustering clustering, ColumnDefinition column, ByteBuffer collectionElement) +- { +- this.clustering = clustering; +- this.column = column; +- this.collectionElement = collectionElement; +- } +- +- public ByteBuffer encode(CFMetaData metadata) +- { +- return encodeCellName(metadata, clustering, column == null ? ByteBufferUtil.EMPTY_BYTE_BUFFER : column.name.bytes, collectionElement); +- } +- +- public ByteBuffer superColumnSubName() +- { +- assert collectionElement != null; +- return collectionElement; +- } +- +- public ByteBuffer superColumnName() +- { +- return clustering.get(0); +- } +- +- @Override +- public String toString() +- { +- StringBuilder sb = new StringBuilder(); +- for (int i = 0; i < clustering.size(); i++) +- sb.append(i > 0 ? ":" : "").append(clustering.get(i) == null ? "null" : ByteBufferUtil.bytesToHex(clustering.get(i))); +- return String.format("Cellname(clustering=%s, column=%s, collElt=%s)", sb.toString(), column == null ? "null" : column.name, collectionElement == null ? "null" : ByteBufferUtil.bytesToHex(collectionElement)); +- } +- } +- +- public static class LegacyBound +- { +- public static final LegacyBound BOTTOM = new LegacyBound(ClusteringBound.BOTTOM, false, null); +- public static final LegacyBound TOP = new LegacyBound(ClusteringBound.TOP, false, null); +- +- public final ClusteringBound bound; +- public final boolean isStatic; +- public final ColumnDefinition collectionName; +- +- public LegacyBound(ClusteringBound bound, boolean isStatic, ColumnDefinition collectionName) +- { +- this.bound = bound; +- this.isStatic = isStatic; +- this.collectionName = collectionName; +- } +- +- public Clustering getAsClustering(CFMetaData metadata) +- { +- if (isStatic) +- return Clustering.STATIC_CLUSTERING; +- +- assert bound.size() == metadata.comparator.size(); +- ByteBuffer[] values = new ByteBuffer[bound.size()]; +- for (int i = 0; i < bound.size(); i++) +- values[i] = bound.get(i); +- return Clustering.make(values); +- } +- +- @Override +- public String toString() +- { +- StringBuilder sb = new StringBuilder(); +- sb.append(bound.kind()).append('('); +- for (int i = 0; i < bound.size(); i++) +- sb.append(i > 0 ? ":" : "").append(bound.get(i) == null ? "null" : ByteBufferUtil.bytesToHex(bound.get(i))); +- sb.append(')'); +- return String.format("Bound(%s, collection=%s)", sb.toString(), collectionName == null ? "null" : collectionName.name); +- } +- } +- +- public interface LegacyAtom +- { +- public boolean isCell(); +- +- public ClusteringPrefix clustering(); +- public boolean isStatic(); +- +- public LegacyCell asCell(); +- public LegacyRangeTombstone asRangeTombstone(); +- } +- +- /** +- * A legacy cell. +- *

+- * This is used as a temporary object to facilitate dealing with the legacy format, this +- * is not meant to be optimal. +- */ +- public static class LegacyCell implements LegacyAtom +- { +- private final static int DELETION_MASK = 0x01; +- private final static int EXPIRATION_MASK = 0x02; +- private final static int COUNTER_MASK = 0x04; +- private final static int COUNTER_UPDATE_MASK = 0x08; +- private final static int RANGE_TOMBSTONE_MASK = 0x10; +- +- public enum Kind { REGULAR, EXPIRING, DELETED, COUNTER } +- +- public final Kind kind; +- +- public final LegacyCellName name; +- public final ByteBuffer value; +- +- public final long timestamp; +- public final int localDeletionTime; +- public final int ttl; +- +- private LegacyCell(Kind kind, LegacyCellName name, ByteBuffer value, long timestamp, int localDeletionTime, int ttl) +- { +- this.kind = kind; +- this.name = name; +- this.value = value; +- this.timestamp = timestamp; +- this.localDeletionTime = localDeletionTime; +- this.ttl = ttl; +- } +- +- public static LegacyCell regular(CFMetaData metadata, ByteBuffer superColumnName, ByteBuffer name, ByteBuffer value, long timestamp) +- throws UnknownColumnException +- { +- return new LegacyCell(Kind.REGULAR, decodeCellName(metadata, superColumnName, name), value, timestamp, Cell.NO_DELETION_TIME, Cell.NO_TTL); +- } +- +- public static LegacyCell expiring(CFMetaData metadata, ByteBuffer superColumnName, ByteBuffer name, ByteBuffer value, long timestamp, int ttl, int nowInSec) +- throws UnknownColumnException +- { +- return new LegacyCell(Kind.EXPIRING, decodeCellName(metadata, superColumnName, name), value, timestamp, nowInSec + ttl, ttl); +- } +- +- public static LegacyCell tombstone(CFMetaData metadata, ByteBuffer superColumnName, ByteBuffer name, long timestamp, int nowInSec) +- throws UnknownColumnException +- { +- return new LegacyCell(Kind.DELETED, decodeCellName(metadata, superColumnName, name), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, nowInSec, LivenessInfo.NO_TTL); +- } +- +- public static LegacyCell counter(CFMetaData metadata, ByteBuffer superColumnName, ByteBuffer name, long value) +- throws UnknownColumnException +- { +- // See UpdateParameters.addCounter() for more details on this +- ByteBuffer counterValue = CounterContext.instance().createLocal(value); +- return counter(decodeCellName(metadata, superColumnName, name), counterValue); +- } +- +- public static LegacyCell counter(LegacyCellName name, ByteBuffer value) +- { +- return new LegacyCell(Kind.COUNTER, name, value, FBUtilities.timestampMicros(), Cell.NO_DELETION_TIME, Cell.NO_TTL); +- } +- +- public byte serializationFlags() +- { +- if (isExpiring()) +- return EXPIRATION_MASK; +- if (isTombstone()) +- return DELETION_MASK; +- if (isCounterUpdate()) +- return COUNTER_UPDATE_MASK; +- if (isCounter()) +- return COUNTER_MASK; +- return 0; +- } +- +- private boolean isCounterUpdate() +- { +- // See UpdateParameters.addCounter() for more details on this +- return isCounter() && CounterContext.instance().isLocal(value); +- } +- +- public ClusteringPrefix clustering() +- { +- return name.clustering; +- } +- +- public boolean isStatic() +- { +- return name.clustering == Clustering.STATIC_CLUSTERING; +- } +- +- public boolean isCell() +- { +- return true; +- } +- +- public LegacyCell asCell() +- { +- return this; +- } +- +- public LegacyRangeTombstone asRangeTombstone() +- { +- throw new UnsupportedOperationException(); +- } +- +- public boolean isCounter() +- { +- return kind == Kind.COUNTER; +- } +- +- public boolean isExpiring() +- { +- return kind == Kind.EXPIRING; +- } +- +- public boolean isTombstone() +- { +- return kind == Kind.DELETED; +- } +- +- public boolean isLive(int nowInSec) +- { +- if (isTombstone()) +- return false; +- +- return !isExpiring() || nowInSec < localDeletionTime; +- } +- +- @Override +- public String toString() +- { +- return String.format("LegacyCell(%s, name=%s, v=%s, ts=%s, ldt=%s, ttl=%s)", kind, name, ByteBufferUtil.bytesToHex(value), timestamp, localDeletionTime, ttl); +- } +- } +- +- /** +- * A legacy range tombstone. +- *

+- * This is used as a temporary object to facilitate dealing with the legacy format, this +- * is not meant to be optimal. +- */ +- public static class LegacyRangeTombstone implements LegacyAtom +- { +- public final LegacyBound start; +- public final LegacyBound stop; +- public final DeletionTime deletionTime; +- +- public LegacyRangeTombstone(LegacyBound start, LegacyBound stop, DeletionTime deletionTime) +- { +- // Because of the way RangeTombstoneList work, we can have a tombstone where only one of +- // the bound has a collectionName. That happens if we have a big tombstone A (spanning one +- // or multiple rows) and a collection tombstone B. In that case, RangeTombstoneList will +- // split this into 3 RTs: the first one from the beginning of A to the beginning of B, +- // then B, then a third one from the end of B to the end of A. To make this simpler, if +- // we detect that case we transform the 1st and 3rd tombstone so they don't end in the middle +- // of a row (which is still correct). +- if ((start.collectionName == null) != (stop.collectionName == null)) +- { +- if (start.collectionName == null) +- stop = new LegacyBound(stop.bound, stop.isStatic, null); +- else +- start = new LegacyBound(start.bound, start.isStatic, null); +- } +- else if (!Objects.equals(start.collectionName, stop.collectionName)) +- { +- // We're in the similar but slightly more complex case where on top of the big tombstone +- // A, we have 2 (or more) collection tombstones B and C within A. So we also end up with +- // a tombstone that goes between the end of B and the start of C. +- start = new LegacyBound(start.bound, start.isStatic, null); +- stop = new LegacyBound(stop.bound, stop.isStatic, null); +- } +- +- this.start = start; +- this.stop = stop; +- this.deletionTime = deletionTime; +- } +- +- public ClusteringPrefix clustering() +- { +- return start.bound; +- } +- +- public LegacyRangeTombstone withNewStart(LegacyBound newStart) +- { +- return new LegacyRangeTombstone(newStart, stop, deletionTime); +- } +- +- public LegacyRangeTombstone withNewEnd(LegacyBound newStop) +- { +- return new LegacyRangeTombstone(start, newStop, deletionTime); +- } +- +- public boolean isCell() +- { +- return false; +- } +- +- public boolean isStatic() +- { +- return start.isStatic || stop.isStatic; +- } +- +- public LegacyCell asCell() +- { +- throw new UnsupportedOperationException(); +- } +- +- public LegacyRangeTombstone asRangeTombstone() +- { +- return this; +- } +- +- public boolean isCollectionTombstone() +- { +- return start.collectionName != null; +- } +- +- public boolean isRowDeletion(CFMetaData metadata) +- { +- if (start.collectionName != null +- || stop.collectionName != null +- || start.bound.size() != metadata.comparator.size() +- || stop.bound.size() != metadata.comparator.size()) +- return false; +- +- for (int i = 0; i < start.bound.size(); i++) +- if (!Objects.equals(start.bound.get(i), stop.bound.get(i))) +- return false; +- return true; +- } +- +- @Override +- public String toString() +- { +- return String.format("RT(%s-%s, %s)", start, stop, deletionTime); +- } +- } +- +- public static class LegacyDeletionInfo +- { +- public final MutableDeletionInfo deletionInfo; +- public final List inRowTombstones = new ArrayList<>(); +- +- private LegacyDeletionInfo(MutableDeletionInfo deletionInfo) +- { +- this.deletionInfo = deletionInfo; +- } +- +- public static LegacyDeletionInfo live() +- { +- return new LegacyDeletionInfo(MutableDeletionInfo.live()); +- } +- +- public void add(DeletionTime topLevel) +- { +- deletionInfo.add(topLevel); +- } +- +- private static ClusteringBound staticBound(CFMetaData metadata, boolean isStart) +- { +- // In pre-3.0 nodes, static row started by a clustering with all empty values so we +- // preserve that here. Note that in practice, it doesn't really matter since the rest +- // of the code will ignore the bound for RT that have their static flag set. +- ByteBuffer[] values = new ByteBuffer[metadata.comparator.size()]; +- for (int i = 0; i < values.length; i++) +- values[i] = ByteBufferUtil.EMPTY_BYTE_BUFFER; +- return isStart +- ? ClusteringBound.inclusiveStartOf(values) +- : ClusteringBound.inclusiveEndOf(values); +- } +- +- public void add(CFMetaData metadata, LegacyRangeTombstone tombstone) +- { +- if (metadata.hasStaticColumns()) +- { +- /* +- * For table having static columns we have to deal with the following cases: +- * 1. the end of the tombstone is static (in which case either the start is static or is BOTTOM, which is the same +- * for our consideration). This mean that either the range only delete the static row, or that it's a collection +- * tombstone of a static collection. In both case, we just add the tombstone to the inRowTombstones. +- * 2. only the start is static. There is then 2 subcase: either the start is inclusive, and that mean we include the +- * static row and more (so we add an inRowTombstone for the static and deal with the rest normally). Or the start +- * is exclusive, and that means we explicitely exclude the static (in which case we can just add the tombstone +- * as if it started at BOTTOM). +- * 3. none of the bound are static but the start is BOTTOM. This means we intended to delete the static row so we +- * need to add it to the inRowTombstones (and otherwise handle the range normally). +- */ +- if (tombstone.stop.isStatic) +- { +- // If the start is BOTTOM, we replace it by the beginning of the starting row so as to not confuse the +- // RangeTombstone.isRowDeletion() method +- if (tombstone.start == LegacyBound.BOTTOM) +- tombstone = tombstone.withNewStart(new LegacyBound(staticBound(metadata, true), true, null)); +- inRowTombstones.add(tombstone); +- return; +- } +- +- if (tombstone.start.isStatic) +- { +- if (tombstone.start.bound.isInclusive()) +- inRowTombstones.add(tombstone.withNewEnd(new LegacyBound(staticBound(metadata, false), true, null))); +- +- tombstone = tombstone.withNewStart(LegacyBound.BOTTOM); +- } +- else if (tombstone.start == LegacyBound.BOTTOM) +- { +- inRowTombstones.add(new LegacyRangeTombstone(new LegacyBound(staticBound(metadata, true), true, null), +- new LegacyBound(staticBound(metadata, false), true, null), +- tombstone.deletionTime)); +- } +- } +- +- if (tombstone.isCollectionTombstone() || tombstone.isRowDeletion(metadata)) +- inRowTombstones.add(tombstone); +- else +- add(metadata, new RangeTombstone(Slice.make(tombstone.start.bound, tombstone.stop.bound), tombstone.deletionTime)); +- } +- +- public void add(CFMetaData metadata, RangeTombstone tombstone) +- { +- deletionInfo.add(tombstone, metadata.comparator); +- } +- +- public Iterator inRowRangeTombstones() +- { +- return inRowTombstones.iterator(); +- } +- +- public static LegacyDeletionInfo deserialize(CFMetaData metadata, DataInputPlus in) throws IOException +- { +- DeletionTime topLevel = DeletionTime.serializer.deserialize(in); +- +- int rangeCount = in.readInt(); +- if (rangeCount == 0) +- return new LegacyDeletionInfo(new MutableDeletionInfo(topLevel)); +- +- LegacyDeletionInfo delInfo = new LegacyDeletionInfo(new MutableDeletionInfo(topLevel)); +- for (int i = 0; i < rangeCount; i++) +- { +- LegacyBound start = decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), true); +- LegacyBound end = decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), false); +- int delTime = in.readInt(); +- long markedAt = in.readLong(); +- +- delInfo.add(metadata, new LegacyRangeTombstone(start, end, new DeletionTime(markedAt, delTime))); +- } +- return delInfo; +- } +- } +- +- /** +- * A helper class for LegacyRangeTombstoneList. This replaces the Comparator that RTL used before 3.0. +- */ +- private static class LegacyBoundComparator implements Comparator +- { +- ClusteringComparator clusteringComparator; +- +- public LegacyBoundComparator(ClusteringComparator clusteringComparator) +- { +- this.clusteringComparator = clusteringComparator; +- } +- +- public int compare(LegacyBound a, LegacyBound b) +- { +- // In the legacy sorting, BOTTOM comes before anything else +- if (a == LegacyBound.BOTTOM) +- return b == LegacyBound.BOTTOM ? 0 : -1; +- if (b == LegacyBound.BOTTOM) +- return 1; +- +- // Excluding BOTTOM, statics are always before anything else. +- if (a.isStatic != b.isStatic) +- return a.isStatic ? -1 : 1; +- +- int result = this.clusteringComparator.compare(a.bound, b.bound); +- if (result != 0) +- return result; +- +- // If both have equal "bound" but one is a collection tombstone and not the other, then the other comes before as it points to the beginning of the row. +- if (a.collectionName == null) +- return b.collectionName == null ? 0 : 1; +- if (b.collectionName == null) +- return -1; +- +- return UTF8Type.instance.compare(a.collectionName.name.bytes, b.collectionName.name.bytes); +- } +- } +- +- /** +- * Almost an entire copy of RangeTombstoneList from C* 2.1. The main difference is that LegacyBoundComparator +- * is used in place of {@code Comparator} (because Composite doesn't exist any more). +- * +- * This class is needed to allow us to convert single-row deletions and complex deletions into range tombstones +- * and properly merge them into the normal set of range tombstones. +- */ +- public static class LegacyRangeTombstoneList +- { +- private final LegacyBoundComparator comparator; +- +- // Note: we don't want to use a List for the markedAts and delTimes to avoid boxing. We could +- // use a List for starts and ends, but having arrays everywhere is almost simpler. +- private LegacyBound[] starts; +- private LegacyBound[] ends; +- private long[] markedAts; +- private int[] delTimes; +- +- private int size; +- +- private LegacyRangeTombstoneList(LegacyBoundComparator comparator, LegacyBound[] starts, LegacyBound[] ends, long[] markedAts, int[] delTimes, int size) +- { +- assert starts.length == ends.length && starts.length == markedAts.length && starts.length == delTimes.length; +- this.comparator = comparator; +- this.starts = starts; +- this.ends = ends; +- this.markedAts = markedAts; +- this.delTimes = delTimes; +- this.size = size; +- } +- +- public LegacyRangeTombstoneList(LegacyBoundComparator comparator, int capacity) +- { +- this(comparator, new LegacyBound[capacity], new LegacyBound[capacity], new long[capacity], new int[capacity], 0); +- } +- +- public boolean isEmpty() +- { +- return size == 0; +- } +- +- public int size() +- { +- return size; +- } +- +- /** +- * Adds a new range tombstone. +- * +- * This method will be faster if the new tombstone sort after all the currently existing ones (this is a common use case), +- * but it doesn't assume it. +- */ +- public void add(LegacyBound start, LegacyBound end, long markedAt, int delTime) +- { +- if (isEmpty()) +- { +- addInternal(0, start, end, markedAt, delTime); +- return; +- } +- +- int c = comparator.compare(ends[size-1], start); +- +- // Fast path if we add in sorted order +- if (c <= 0) +- { +- addInternal(size, start, end, markedAt, delTime); +- } +- else +- { +- // Note: insertFrom expect i to be the insertion point in term of interval ends +- int pos = Arrays.binarySearch(ends, 0, size, start, comparator); +- insertFrom((pos >= 0 ? pos : -pos-1), start, end, markedAt, delTime); +- } +- } +- +- /* +- * Inserts a new element starting at index i. This method assumes that: +- * ends[i-1] <= start <= ends[i] +- * +- * A RangeTombstoneList is a list of range [s_0, e_0]...[s_n, e_n] such that: +- * - s_i <= e_i +- * - e_i <= s_i+1 +- * - if s_i == e_i and e_i == s_i+1 then s_i+1 < e_i+1 +- * Basically, range are non overlapping except for their bound and in order. And while +- * we allow ranges with the same value for the start and end, we don't allow repeating +- * such range (so we can't have [0, 0][0, 0] even though it would respect the first 2 +- * conditions). +- * +- */ +- +- /** +- * Adds all the range tombstones of {@code tombstones} to this RangeTombstoneList. +- */ +- public void addAll(LegacyRangeTombstoneList tombstones) +- { +- if (tombstones.isEmpty()) +- return; +- +- if (isEmpty()) +- { +- copyArrays(tombstones, this); +- return; +- } +- +- /* +- * We basically have 2 techniques we can use here: either we repeatedly call add() on tombstones values, +- * or we do a merge of both (sorted) lists. If this lists is bigger enough than the one we add, then +- * calling add() will be faster, otherwise it's merging that will be faster. +- * +- * Let's note that during memtables updates, it might not be uncommon that a new update has only a few range +- * tombstones, while the CF we're adding it to (the one in the memtable) has many. In that case, using add() is +- * likely going to be faster. +- * +- * In other cases however, like when diffing responses from multiple nodes, the tombstone lists we "merge" will +- * be likely sized, so using add() might be a bit inefficient. +- * +- * Roughly speaking (this ignore the fact that updating an element is not exactly constant but that's not a big +- * deal), if n is the size of this list and m is tombstones size, merging is O(n+m) while using add() is O(m*log(n)). +- * +- * But let's not crank up a logarithm computation for that. Long story short, merging will be a bad choice only +- * if this list size is lot bigger that the other one, so let's keep it simple. +- */ +- if (size > 10 * tombstones.size) +- { +- for (int i = 0; i < tombstones.size; i++) +- add(tombstones.starts[i], tombstones.ends[i], tombstones.markedAts[i], tombstones.delTimes[i]); +- } +- else +- { +- int i = 0; +- int j = 0; +- while (i < size && j < tombstones.size) +- { +- if (comparator.compare(tombstones.starts[j], ends[i]) <= 0) +- { +- insertFrom(i, tombstones.starts[j], tombstones.ends[j], tombstones.markedAts[j], tombstones.delTimes[j]); +- j++; +- } +- else +- { +- i++; +- } +- } +- // Addds the remaining ones from tombstones if any (note that addInternal will increment size if relevant). +- for (; j < tombstones.size; j++) +- addInternal(size, tombstones.starts[j], tombstones.ends[j], tombstones.markedAts[j], tombstones.delTimes[j]); +- } +- } +- +- private static void copyArrays(LegacyRangeTombstoneList src, LegacyRangeTombstoneList dst) +- { +- dst.grow(src.size); +- System.arraycopy(src.starts, 0, dst.starts, 0, src.size); +- System.arraycopy(src.ends, 0, dst.ends, 0, src.size); +- System.arraycopy(src.markedAts, 0, dst.markedAts, 0, src.size); +- System.arraycopy(src.delTimes, 0, dst.delTimes, 0, src.size); +- dst.size = src.size; +- } +- +- private void insertFrom(int i, LegacyBound start, LegacyBound end, long markedAt, int delTime) +- { +- while (i < size) +- { +- assert i == 0 || comparator.compare(ends[i-1], start) <= 0; +- +- int c = comparator.compare(start, ends[i]); +- assert c <= 0; +- if (c == 0) +- { +- // If start == ends[i], then we can insert from the next one (basically the new element +- // really start at the next element), except for the case where starts[i] == ends[i]. +- // In this latter case, if we were to move to next element, we could end up with ...[x, x][x, x]... +- if (comparator.compare(starts[i], ends[i]) == 0) +- { +- // The current element cover a single value which is equal to the start of the inserted +- // element. If the inserted element overwrites the current one, just remove the current +- // (it's included in what we insert) and proceed with the insert. +- if (markedAt > markedAts[i]) +- { +- removeInternal(i); +- continue; +- } +- +- // Otherwise (the current singleton interval override the new one), we want to leave the +- // current element and move to the next, unless start == end since that means the new element +- // is in fact fully covered by the current one (so we're done) +- if (comparator.compare(start, end) == 0) +- return; +- } +- i++; +- continue; +- } +- +- // Do we overwrite the current element? +- if (markedAt > markedAts[i]) +- { +- // We do overwrite. +- +- // First deal with what might come before the newly added one. +- if (comparator.compare(starts[i], start) < 0) +- { +- addInternal(i, starts[i], start, markedAts[i], delTimes[i]); +- i++; +- // We don't need to do the following line, but in spirit that's what we want to do +- // setInternal(i, start, ends[i], markedAts, delTime]) +- } +- +- // now, start <= starts[i] +- +- // Does the new element stops before/at the current one, +- int endCmp = comparator.compare(end, starts[i]); +- if (endCmp <= 0) +- { +- // Here start <= starts[i] and end <= starts[i] +- // This means the current element is before the current one. However, one special +- // case is if end == starts[i] and starts[i] == ends[i]. In that case, +- // the new element entirely overwrite the current one and we can just overwrite +- if (endCmp == 0 && comparator.compare(starts[i], ends[i]) == 0) +- setInternal(i, start, end, markedAt, delTime); +- else +- addInternal(i, start, end, markedAt, delTime); +- return; +- } +- +- // Do we overwrite the current element fully? +- int cmp = comparator.compare(ends[i], end); +- if (cmp <= 0) +- { +- // We do overwrite fully: +- // update the current element until it's end and continue +- // on with the next element (with the new inserted start == current end). +- +- // If we're on the last element, we can optimize +- if (i == size-1) +- { +- setInternal(i, start, end, markedAt, delTime); +- return; +- } +- +- setInternal(i, start, ends[i], markedAt, delTime); +- if (cmp == 0) +- return; +- +- start = ends[i]; +- i++; +- } +- else +- { +- // We don't ovewrite fully. Insert the new interval, and then update the now next +- // one to reflect the not overwritten parts. We're then done. +- addInternal(i, start, end, markedAt, delTime); +- i++; +- setInternal(i, end, ends[i], markedAts[i], delTimes[i]); +- return; +- } +- } +- else +- { +- // we don't overwrite the current element +- +- // If the new interval starts before the current one, insert that new interval +- if (comparator.compare(start, starts[i]) < 0) +- { +- // If we stop before the start of the current element, just insert the new +- // interval and we're done; otherwise insert until the beginning of the +- // current element +- if (comparator.compare(end, starts[i]) <= 0) +- { +- addInternal(i, start, end, markedAt, delTime); +- return; +- } +- addInternal(i, start, starts[i], markedAt, delTime); +- i++; +- } +- +- // After that, we're overwritten on the current element but might have +- // some residual parts after ... +- +- // ... unless we don't extend beyond it. +- if (comparator.compare(end, ends[i]) <= 0) +- return; +- +- start = ends[i]; +- i++; +- } +- } +- +- // If we got there, then just insert the remainder at the end +- addInternal(i, start, end, markedAt, delTime); +- } +- private int capacity() +- { +- return starts.length; +- } +- +- private void addInternal(int i, LegacyBound start, LegacyBound end, long markedAt, int delTime) +- { +- assert i >= 0; +- +- if (size == capacity()) +- growToFree(i); +- else if (i < size) +- moveElements(i); +- +- setInternal(i, start, end, markedAt, delTime); +- size++; +- } +- +- private void removeInternal(int i) +- { +- assert i >= 0; +- +- System.arraycopy(starts, i+1, starts, i, size - i - 1); +- System.arraycopy(ends, i+1, ends, i, size - i - 1); +- System.arraycopy(markedAts, i+1, markedAts, i, size - i - 1); +- System.arraycopy(delTimes, i+1, delTimes, i, size - i - 1); +- +- --size; +- starts[size] = null; +- ends[size] = null; +- } +- +- /* +- * Grow the arrays, leaving index i "free" in the process. +- */ +- private void growToFree(int i) +- { +- int newLength = (capacity() * 3) / 2 + 1; +- grow(i, newLength); +- } +- +- /* +- * Grow the arrays to match newLength capacity. +- */ +- private void grow(int newLength) +- { +- if (capacity() < newLength) +- grow(-1, newLength); +- } +- +- private void grow(int i, int newLength) +- { +- starts = grow(starts, size, newLength, i); +- ends = grow(ends, size, newLength, i); +- markedAts = grow(markedAts, size, newLength, i); +- delTimes = grow(delTimes, size, newLength, i); +- } +- +- private static LegacyBound[] grow(LegacyBound[] a, int size, int newLength, int i) +- { +- if (i < 0 || i >= size) +- return Arrays.copyOf(a, newLength); +- +- LegacyBound[] newA = new LegacyBound[newLength]; +- System.arraycopy(a, 0, newA, 0, i); +- System.arraycopy(a, i, newA, i+1, size - i); +- return newA; +- } +- +- private static long[] grow(long[] a, int size, int newLength, int i) +- { +- if (i < 0 || i >= size) +- return Arrays.copyOf(a, newLength); +- +- long[] newA = new long[newLength]; +- System.arraycopy(a, 0, newA, 0, i); +- System.arraycopy(a, i, newA, i+1, size - i); +- return newA; +- } +- +- private static int[] grow(int[] a, int size, int newLength, int i) +- { +- if (i < 0 || i >= size) +- return Arrays.copyOf(a, newLength); +- +- int[] newA = new int[newLength]; +- System.arraycopy(a, 0, newA, 0, i); +- System.arraycopy(a, i, newA, i+1, size - i); +- return newA; +- } +- +- /* +- * Move elements so that index i is "free", assuming the arrays have at least one free slot at the end. +- */ +- private void moveElements(int i) +- { +- if (i >= size) +- return; +- +- System.arraycopy(starts, i, starts, i+1, size - i); +- System.arraycopy(ends, i, ends, i+1, size - i); +- System.arraycopy(markedAts, i, markedAts, i+1, size - i); +- System.arraycopy(delTimes, i, delTimes, i+1, size - i); +- // we set starts[i] to null to indicate the position is now empty, so that we update boundaryHeapSize +- // when we set it +- starts[i] = null; +- } +- +- private void setInternal(int i, LegacyBound start, LegacyBound end, long markedAt, int delTime) +- { +- starts[i] = start; +- ends[i] = end; +- markedAts[i] = markedAt; +- delTimes[i] = delTime; +- } +- +- public void updateDigest(MessageDigest digest) +- { +- ByteBuffer longBuffer = ByteBuffer.allocate(8); +- for (int i = 0; i < size; i++) +- { +- for (int j = 0; j < starts[i].bound.size(); j++) +- digest.update(starts[i].bound.get(j).duplicate()); +- if (starts[i].collectionName != null) +- digest.update(starts[i].collectionName.name.bytes.duplicate()); +- for (int j = 0; j < ends[i].bound.size(); j++) +- digest.update(ends[i].bound.get(j).duplicate()); +- if (ends[i].collectionName != null) +- digest.update(ends[i].collectionName.name.bytes.duplicate()); +- +- longBuffer.putLong(0, markedAts[i]); +- digest.update(longBuffer.array(), 0, 8); +- } +- } +- +- public void serialize(DataOutputPlus out, CFMetaData metadata) throws IOException +- { +- out.writeInt(size); +- if (size == 0) +- return; +- +- if (metadata.isCompound()) +- serializeCompound(out, metadata.isDense()); +- else +- serializeSimple(out); +- } +- +- private void serializeCompound(DataOutputPlus out, boolean isDense) throws IOException +- { +- List> types = new ArrayList<>(comparator.clusteringComparator.subtypes()); +- +- if (!isDense) +- types.add(UTF8Type.instance); +- +- CompositeType type = CompositeType.getInstance(types); +- +- for (int i = 0; i < size; i++) +- { +- LegacyBound start = starts[i]; +- LegacyBound end = ends[i]; +- +- CompositeType.Builder startBuilder = type.builder(); +- CompositeType.Builder endBuilder = type.builder(); +- for (int j = 0; j < start.bound.clustering().size(); j++) +- { +- startBuilder.add(start.bound.get(j)); +- endBuilder.add(end.bound.get(j)); +- } +- +- if (start.collectionName != null) +- startBuilder.add(start.collectionName.name.bytes); +- if (end.collectionName != null) +- endBuilder.add(end.collectionName.name.bytes); +- +- ByteBufferUtil.writeWithShortLength(startBuilder.build(), out); +- ByteBufferUtil.writeWithShortLength(endBuilder.buildAsEndOfRange(), out); +- +- out.writeInt(delTimes[i]); +- out.writeLong(markedAts[i]); +- } +- } +- +- private void serializeSimple(DataOutputPlus out) throws IOException +- { +- List> types = new ArrayList<>(comparator.clusteringComparator.subtypes()); +- assert types.size() == 1 : types; +- +- for (int i = 0; i < size; i++) +- { +- LegacyBound start = starts[i]; +- LegacyBound end = ends[i]; +- +- ClusteringPrefix startClustering = start.bound.clustering(); +- ClusteringPrefix endClustering = end.bound.clustering(); +- +- assert startClustering.size() == 1; +- assert endClustering.size() == 1; +- +- ByteBufferUtil.writeWithShortLength(startClustering.get(0), out); +- ByteBufferUtil.writeWithShortLength(endClustering.get(0), out); +- +- out.writeInt(delTimes[i]); +- out.writeLong(markedAts[i]); +- } +- } +- +- public long serializedSize(CFMetaData metadata) +- { +- long size = 0; +- size += TypeSizes.sizeof(this.size); +- +- if (this.size == 0) +- return size; +- +- if (metadata.isCompound()) +- return size + serializedSizeCompound(metadata.isDense()); +- else +- return size + serializedSizeSimple(); +- } +- +- private long serializedSizeCompound(boolean isDense) +- { +- long size = 0; +- List> types = new ArrayList<>(comparator.clusteringComparator.subtypes()); +- if (!isDense) +- types.add(UTF8Type.instance); +- CompositeType type = CompositeType.getInstance(types); +- +- for (int i = 0; i < this.size; i++) +- { +- LegacyBound start = starts[i]; +- LegacyBound end = ends[i]; +- +- CompositeType.Builder startBuilder = type.builder(); +- CompositeType.Builder endBuilder = type.builder(); +- for (int j = 0; j < start.bound.clustering().size(); j++) +- { +- startBuilder.add(start.bound.get(j)); +- endBuilder.add(end.bound.get(j)); +- } +- +- if (start.collectionName != null) +- startBuilder.add(start.collectionName.name.bytes); +- if (end.collectionName != null) +- endBuilder.add(end.collectionName.name.bytes); +- +- size += ByteBufferUtil.serializedSizeWithShortLength(startBuilder.build()); +- size += ByteBufferUtil.serializedSizeWithShortLength(endBuilder.buildAsEndOfRange()); +- +- size += TypeSizes.sizeof(delTimes[i]); +- size += TypeSizes.sizeof(markedAts[i]); +- } +- return size; +- } +- +- private long serializedSizeSimple() +- { +- long size = 0; +- List> types = new ArrayList<>(comparator.clusteringComparator.subtypes()); +- assert types.size() == 1 : types; +- +- for (int i = 0; i < this.size; i++) +- { +- LegacyBound start = starts[i]; +- LegacyBound end = ends[i]; +- +- ClusteringPrefix startClustering = start.bound.clustering(); +- ClusteringPrefix endClustering = end.bound.clustering(); +- +- assert startClustering.size() == 1; +- assert endClustering.size() == 1; +- +- size += ByteBufferUtil.serializedSizeWithShortLength(startClustering.get(0)); +- size += ByteBufferUtil.serializedSizeWithShortLength(endClustering.get(0)); +- +- size += TypeSizes.sizeof(delTimes[i]); +- size += TypeSizes.sizeof(markedAts[i]); +- } +- return size; +- } +- } +-} +diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java +index e9cca4a..3fb3857 100644 +--- a/src/java/org/apache/cassandra/db/Memtable.java ++++ b/src/java/org/apache/cassandra/db/Memtable.java +@@ -318,7 +318,7 @@ public class Memtable implements Comparable + 100 * allocator.onHeap().ownershipRatio(), 100 * allocator.offHeap().ownershipRatio()); + } + +- public MemtableUnfilteredPartitionIterator makePartitionIterator(final ColumnFilter columnFilter, final DataRange dataRange, final boolean isForThrift) ++ public MemtableUnfilteredPartitionIterator makePartitionIterator(final ColumnFilter columnFilter, final DataRange dataRange) + { + AbstractBounds keyRange = dataRange.keyRange(); + +@@ -344,7 +344,7 @@ public class Memtable implements Comparable + + final Iterator> iter = subMap.entrySet().iterator(); + +- return new MemtableUnfilteredPartitionIterator(cfs, iter, isForThrift, minLocalDeletionTime, columnFilter, dataRange); ++ return new MemtableUnfilteredPartitionIterator(cfs, iter, minLocalDeletionTime, columnFilter, dataRange); + } + + private int findMinLocalDeletionTime(Iterator> iterator) +@@ -520,26 +520,19 @@ public class Memtable implements Comparable + { + private final ColumnFamilyStore cfs; + private final Iterator> iter; +- private final boolean isForThrift; + private final int minLocalDeletionTime; + private final ColumnFilter columnFilter; + private final DataRange dataRange; + +- public MemtableUnfilteredPartitionIterator(ColumnFamilyStore cfs, Iterator> iter, boolean isForThrift, int minLocalDeletionTime, ColumnFilter columnFilter, DataRange dataRange) ++ public MemtableUnfilteredPartitionIterator(ColumnFamilyStore cfs, Iterator> iter, int minLocalDeletionTime, ColumnFilter columnFilter, DataRange dataRange) + { + this.cfs = cfs; + this.iter = iter; +- this.isForThrift = isForThrift; + this.minLocalDeletionTime = minLocalDeletionTime; + this.columnFilter = columnFilter; + this.dataRange = dataRange; + } + +- public boolean isForThrift() +- { +- return isForThrift; +- } +- + public int getMinLocalDeletionTime() + { + return minLocalDeletionTime; +diff --git a/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java b/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java +index 9517503..0a73798 100644 +--- a/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java ++++ b/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java +@@ -45,7 +45,6 @@ import org.apache.cassandra.schema.IndexMetadata; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.StorageProxy; + import org.apache.cassandra.service.pager.*; +-import org.apache.cassandra.thrift.ThriftResultsMerger; + import org.apache.cassandra.tracing.Tracing; + import org.apache.cassandra.utils.FBUtilities; + +@@ -61,7 +60,6 @@ public class PartitionRangeReadCommand extends ReadCommand + + public PartitionRangeReadCommand(boolean isDigest, + int digestVersion, +- boolean isForThrift, + CFMetaData metadata, + int nowInSec, + ColumnFilter columnFilter, +@@ -70,7 +68,7 @@ public class PartitionRangeReadCommand extends ReadCommand + DataRange dataRange, + Optional index) + { +- super(Kind.PARTITION_RANGE, isDigest, digestVersion, isForThrift, metadata, nowInSec, columnFilter, rowFilter, limits); ++ super(Kind.PARTITION_RANGE, isDigest, digestVersion, metadata, nowInSec, columnFilter, rowFilter, limits); + this.dataRange = dataRange; + this.index = index; + } +@@ -83,7 +81,7 @@ public class PartitionRangeReadCommand extends ReadCommand + DataRange dataRange, + Optional index) + { +- this(false, 0, false, metadata, nowInSec, columnFilter, rowFilter, limits, dataRange, index); ++ this(false, 0, metadata, nowInSec, columnFilter, rowFilter, limits, dataRange, index); + } + + /** +@@ -122,12 +120,12 @@ public class PartitionRangeReadCommand extends ReadCommand + + public PartitionRangeReadCommand forSubRange(AbstractBounds range) + { +- return new PartitionRangeReadCommand(isDigestQuery(), digestVersion(), isForThrift(), metadata(), nowInSec(), columnFilter(), rowFilter(), limits(), dataRange().forSubRange(range), index); ++ return new PartitionRangeReadCommand(isDigestQuery(), digestVersion(), metadata(), nowInSec(), columnFilter(), rowFilter(), limits(), dataRange().forSubRange(range), index); + } + + public PartitionRangeReadCommand copy() + { +- return new PartitionRangeReadCommand(isDigestQuery(), digestVersion(), isForThrift(), metadata(), nowInSec(), columnFilter(), rowFilter(), limits(), dataRange(), index); ++ return new PartitionRangeReadCommand(isDigestQuery(), digestVersion(), metadata(), nowInSec(), columnFilter(), rowFilter(), limits(), dataRange(), index); + } + + public PartitionRangeReadCommand withUpdatedLimit(DataLimits newLimits) +@@ -186,16 +184,16 @@ public class PartitionRangeReadCommand extends ReadCommand + for (Memtable memtable : view.memtables) + { + @SuppressWarnings("resource") // We close on exception and on closing the result returned by this method +- Memtable.MemtableUnfilteredPartitionIterator iter = memtable.makePartitionIterator(columnFilter(), dataRange(), isForThrift()); ++ Memtable.MemtableUnfilteredPartitionIterator iter = memtable.makePartitionIterator(columnFilter(), dataRange()); + oldestUnrepairedTombstone = Math.min(oldestUnrepairedTombstone, iter.getMinLocalDeletionTime()); +- iterators.add(isForThrift() ? ThriftResultsMerger.maybeWrap(iter, metadata(), nowInSec()) : iter); ++ iterators.add(iter); + } + + for (SSTableReader sstable : view.sstables) + { + @SuppressWarnings("resource") // We close on exception and on closing the result returned by this method +- UnfilteredPartitionIterator iter = sstable.getScanner(columnFilter(), dataRange(), isForThrift()); +- iterators.add(isForThrift() ? ThriftResultsMerger.maybeWrap(iter, metadata(), nowInSec()) : iter); ++ UnfilteredPartitionIterator iter = sstable.getScanner(columnFilter(), dataRange()); ++ iterators.add(iter); + if (!sstable.isRepaired()) + oldestUnrepairedTombstone = Math.min(oldestUnrepairedTombstone, sstable.getMinLocalDeletionTime()); + } +@@ -312,11 +310,11 @@ public class PartitionRangeReadCommand extends ReadCommand + + private static class Deserializer extends SelectionDeserializer + { +- public ReadCommand deserialize(DataInputPlus in, int version, boolean isDigest, int digestVersion, boolean isForThrift, CFMetaData metadata, int nowInSec, ColumnFilter columnFilter, RowFilter rowFilter, DataLimits limits, Optional index) ++ public ReadCommand deserialize(DataInputPlus in, int version, boolean isDigest, int digestVersion, CFMetaData metadata, int nowInSec, ColumnFilter columnFilter, RowFilter rowFilter, DataLimits limits, Optional index) + throws IOException + { + DataRange range = DataRange.serializer.deserialize(in, version, metadata); +- return new PartitionRangeReadCommand(isDigest, digestVersion, isForThrift, metadata, nowInSec, columnFilter, rowFilter, limits, range, index); ++ return new PartitionRangeReadCommand(isDigest, digestVersion, metadata, nowInSec, columnFilter, rowFilter, limits, range, index); + } + } + } +diff --git a/src/java/org/apache/cassandra/db/ReadCommand.java b/src/java/org/apache/cassandra/db/ReadCommand.java +index 9542703..e14b4fe 100644 +--- a/src/java/org/apache/cassandra/db/ReadCommand.java ++++ b/src/java/org/apache/cassandra/db/ReadCommand.java +@@ -63,43 +63,30 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + protected static final Logger logger = LoggerFactory.getLogger(ReadCommand.class); + public static final IVersionedSerializer serializer = new Serializer(); + +- // For READ verb: will either dispatch on 'serializer' for 3.0 or 'legacyReadCommandSerializer' for earlier version. +- // Can be removed (and replaced by 'serializer') once we drop pre-3.0 backward compatibility. + public static final IVersionedSerializer readSerializer = new ForwardingVersionedSerializer() + { + protected IVersionedSerializer delegate(int version) + { +- return version < MessagingService.VERSION_30 +- ? legacyReadCommandSerializer : serializer; ++ return serializer; + } + }; + +- // For RANGE_SLICE verb: will either dispatch on 'serializer' for 3.0 or 'legacyRangeSliceCommandSerializer' for earlier version. +- // Can be removed (and replaced by 'serializer') once we drop pre-3.0 backward compatibility. + public static final IVersionedSerializer rangeSliceSerializer = new ForwardingVersionedSerializer() + { + protected IVersionedSerializer delegate(int version) + { +- return version < MessagingService.VERSION_30 +- ? legacyRangeSliceCommandSerializer : serializer; ++ return serializer; + } + }; + +- // For PAGED_RANGE verb: will either dispatch on 'serializer' for 3.0 or 'legacyPagedRangeCommandSerializer' for earlier version. +- // Can be removed (and replaced by 'serializer') once we drop pre-3.0 backward compatibility. + public static final IVersionedSerializer pagedRangeSerializer = new ForwardingVersionedSerializer() + { + protected IVersionedSerializer delegate(int version) + { +- return version < MessagingService.VERSION_30 +- ? legacyPagedRangeCommandSerializer : serializer; ++ return serializer; + } + }; + +- public static final IVersionedSerializer legacyRangeSliceCommandSerializer = new LegacyRangeSliceCommandSerializer(); +- public static final IVersionedSerializer legacyPagedRangeCommandSerializer = new LegacyPagedRangeCommandSerializer(); +- public static final IVersionedSerializer legacyReadCommandSerializer = new LegacyReadCommandSerializer(); +- + private final Kind kind; + private final CFMetaData metadata; + private final int nowInSec; +@@ -121,11 +108,10 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + private boolean isDigestQuery; + // if a digest query, the version for which the digest is expected. Ignored if not a digest. + private int digestVersion; +- private final boolean isForThrift; + + protected static abstract class SelectionDeserializer + { +- public abstract ReadCommand deserialize(DataInputPlus in, int version, boolean isDigest, int digestVersion, boolean isForThrift, CFMetaData metadata, int nowInSec, ColumnFilter columnFilter, RowFilter rowFilter, DataLimits limits, Optional index) throws IOException; ++ public abstract ReadCommand deserialize(DataInputPlus in, int version, boolean isDigest, int digestVersion, CFMetaData metadata, int nowInSec, ColumnFilter columnFilter, RowFilter rowFilter, DataLimits limits, Optional index) throws IOException; + } + + protected enum Kind +@@ -144,7 +130,6 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + protected ReadCommand(Kind kind, + boolean isDigestQuery, + int digestVersion, +- boolean isForThrift, + CFMetaData metadata, + int nowInSec, + ColumnFilter columnFilter, +@@ -154,7 +139,6 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + this.kind = kind; + this.isDigestQuery = isDigestQuery; + this.digestVersion = digestVersion; +- this.isForThrift = isForThrift; + this.metadata = metadata; + this.nowInSec = nowInSec; + this.columnFilter = columnFilter; +@@ -283,16 +267,6 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + } + + /** +- * Whether this query is for thrift or not. +- * +- * @return whether this query is for thrift. +- */ +- public boolean isForThrift() +- { +- return isForThrift; +- } +- +- /** + * The clustering index filter this command to use for the provided key. + *

+ * Note that that method should only be called on a key actually queried by this command +@@ -580,12 +554,11 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + // are to some extend an artefact of compaction lagging behind and hence counting them is somewhat unintuitive). + protected UnfilteredPartitionIterator withoutPurgeableTombstones(UnfilteredPartitionIterator iterator, ColumnFamilyStore cfs) + { +- final boolean isForThrift = iterator.isForThrift(); + class WithoutPurgeableTombstones extends PurgeFunction + { + public WithoutPurgeableTombstones() + { +- super(isForThrift, nowInSec(), cfs.gcBefore(nowInSec()), oldestUnrepairedTombstone(), cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones()); ++ super(nowInSec(), cfs.gcBefore(nowInSec()), oldestUnrepairedTombstone(), cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones()); + } + + protected long getMaxPurgeableTimestamp() +@@ -635,11 +608,11 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + return (flags & 0x01) != 0; + } + +- private static int thriftFlag(boolean isForThrift) +- { +- return isForThrift ? 0x02 : 0; +- } +- ++ // We don't set this flag anymore, but still look if we receive a ++ // command with it set in case someone is using thrift a mixed 3.0/4.0+ ++ // cluster (which is unsupported). This is also a reminder for not ++ // re-using this flag until we drop 3.0/3.X compatibility (since it's ++ // used by these release for thrift and would thus confuse things) + private static boolean isForThrift(int flags) + { + return (flags & 0x02) != 0; +@@ -660,7 +633,7 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + assert version >= MessagingService.VERSION_30; + + out.writeByte(command.kind.ordinal()); +- out.writeByte(digestFlag(command.isDigestQuery()) | thriftFlag(command.isForThrift()) | indexFlag(command.index.isPresent())); ++ out.writeByte(digestFlag(command.isDigestQuery()) | indexFlag(command.index.isPresent())); + if (command.isDigestQuery()) + out.writeUnsignedVInt(command.digestVersion()); + CFMetaData.serializer.serialize(command.metadata(), out, version); +@@ -681,7 +654,14 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + Kind kind = Kind.values()[in.readByte()]; + int flags = in.readByte(); + boolean isDigest = isDigest(flags); +- boolean isForThrift = isForThrift(flags); ++ // Shouldn't happen or it's a user error (see comment above) but ++ // better complain loudly than doing the wrong thing. ++ if (isForThrift(flags)) ++ throw new IllegalStateException("Received a command with the thrift flag set. " ++ + "This means thrift is in use in a mixed 3.0/3.X and 4.0+ cluster, " ++ + "which is unsupported. Make sure to stop using thrift before " ++ + "upgrading to 4.0"); ++ + boolean hasIndex = hasIndex(flags); + int digestVersion = isDigest ? (int)in.readUnsignedVInt() : 0; + CFMetaData metadata = CFMetaData.serializer.deserialize(in, version); +@@ -693,7 +673,7 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + ? deserializeIndexMetadata(in, version, metadata) + : Optional.empty(); + +- return kind.selectionDeserializer.deserialize(in, version, isDigest, digestVersion, isForThrift, metadata, nowInSec, columnFilter, rowFilter, limits, index); ++ return kind.selectionDeserializer.deserialize(in, version, isDigest, digestVersion, metadata, nowInSec, columnFilter, rowFilter, limits, index); + } + + private Optional deserializeIndexMetadata(DataInputPlus in, int version, CFMetaData cfm) throws IOException +@@ -729,1015 +709,4 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery + + command.indexSerializedSize(version); + } + } +- +- private enum LegacyType +- { +- GET_BY_NAMES((byte)1), +- GET_SLICES((byte)2); +- +- public final byte serializedValue; +- +- LegacyType(byte b) +- { +- this.serializedValue = b; +- } +- +- public static LegacyType fromPartitionFilterKind(ClusteringIndexFilter.Kind kind) +- { +- return kind == ClusteringIndexFilter.Kind.SLICE +- ? GET_SLICES +- : GET_BY_NAMES; +- } +- +- public static LegacyType fromSerializedValue(byte b) +- { +- return b == 1 ? GET_BY_NAMES : GET_SLICES; +- } +- } +- +- /** +- * Serializer for pre-3.0 RangeSliceCommands. +- */ +- private static class LegacyRangeSliceCommandSerializer implements IVersionedSerializer +- { +- public void serialize(ReadCommand command, DataOutputPlus out, int version) throws IOException +- { +- assert version < MessagingService.VERSION_30; +- +- PartitionRangeReadCommand rangeCommand = (PartitionRangeReadCommand) command; +- assert !rangeCommand.dataRange().isPaging(); +- +- // convert pre-3.0 incompatible names filters to slice filters +- rangeCommand = maybeConvertNamesToSlice(rangeCommand); +- +- CFMetaData metadata = rangeCommand.metadata(); +- +- out.writeUTF(metadata.ksName); +- out.writeUTF(metadata.cfName); +- out.writeLong(rangeCommand.nowInSec() * 1000L); // convert from seconds to millis +- +- // begin DiskAtomFilterSerializer.serialize() +- if (rangeCommand.isNamesQuery()) +- { +- out.writeByte(1); // 0 for slices, 1 for names +- ClusteringIndexNamesFilter filter = (ClusteringIndexNamesFilter) rangeCommand.dataRange().clusteringIndexFilter; +- LegacyReadCommandSerializer.serializeNamesFilter(rangeCommand, filter, out); +- } +- else +- { +- out.writeByte(0); // 0 for slices, 1 for names +- +- // slice filter serialization +- ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter) rangeCommand.dataRange().clusteringIndexFilter; +- +- boolean makeStaticSlice = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() && !filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING); +- LegacyReadCommandSerializer.serializeSlices(out, filter.requestedSlices(), filter.isReversed(), makeStaticSlice, metadata); +- +- out.writeBoolean(filter.isReversed()); +- +- // limit +- DataLimits limits = rangeCommand.limits(); +- if (limits.isDistinct()) +- out.writeInt(1); +- else +- out.writeInt(LegacyReadCommandSerializer.updateLimitForQuery(rangeCommand.limits().count(), filter.requestedSlices())); +- +- int compositesToGroup; +- boolean selectsStatics = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() || filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING); +- if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT) +- compositesToGroup = -1; +- else if (limits.isDistinct() && !selectsStatics) +- compositesToGroup = -2; // for DISTINCT queries (CASSANDRA-8490) +- else +- compositesToGroup = metadata.isDense() ? -1 : metadata.clusteringColumns().size(); +- +- out.writeInt(compositesToGroup); +- } +- +- serializeRowFilter(out, rangeCommand.rowFilter()); +- AbstractBounds.rowPositionSerializer.serialize(rangeCommand.dataRange().keyRange(), out, version); +- +- // maxResults +- out.writeInt(rangeCommand.limits().count()); +- +- // countCQL3Rows +- if (rangeCommand.isForThrift() || rangeCommand.limits().perPartitionCount() == 1) // if for Thrift or DISTINCT +- out.writeBoolean(false); +- else +- out.writeBoolean(true); +- +- // isPaging +- out.writeBoolean(false); +- } +- +- public ReadCommand deserialize(DataInputPlus in, int version) throws IOException +- { +- assert version < MessagingService.VERSION_30; +- +- String keyspace = in.readUTF(); +- String columnFamily = in.readUTF(); +- +- CFMetaData metadata = Schema.instance.getCFMetaData(keyspace, columnFamily); +- if (metadata == null) +- { +- String message = String.format("Got legacy range command for nonexistent table %s.%s.", keyspace, columnFamily); +- throw new UnknownColumnFamilyException(message, null); +- } +- +- int nowInSec = (int) (in.readLong() / 1000); // convert from millis to seconds +- +- ClusteringIndexFilter filter; +- ColumnFilter selection; +- int compositesToGroup = 0; +- int perPartitionLimit = -1; +- byte readType = in.readByte(); // 0 for slices, 1 for names +- if (readType == 1) +- { +- Pair selectionAndFilter = LegacyReadCommandSerializer.deserializeNamesSelectionAndFilter(in, metadata); +- selection = selectionAndFilter.left; +- filter = selectionAndFilter.right; +- } +- else +- { +- Pair p = LegacyReadCommandSerializer.deserializeSlicePartitionFilter(in, metadata); +- filter = p.left; +- perPartitionLimit = in.readInt(); +- compositesToGroup = in.readInt(); +- selection = getColumnSelectionForSlice(p.right, compositesToGroup, metadata); +- } +- +- RowFilter rowFilter = deserializeRowFilter(in, metadata); +- +- AbstractBounds keyRange = AbstractBounds.rowPositionSerializer.deserialize(in, metadata.partitioner, version); +- int maxResults = in.readInt(); +- +- boolean countCQL3Rows = in.readBoolean(); // countCQL3Rows (not needed) +- in.readBoolean(); // isPaging (not needed) +- +- boolean selectsStatics = (!selection.fetchedColumns().statics.isEmpty() || filter.selects(Clustering.STATIC_CLUSTERING)); +- // We have 2 types of DISTINCT queries: ones on only the partition key, and ones on the partition key and static columns. For the former, +- // we can easily detect the case because compositeToGroup is -2 and that's the only case it can be that. The latter one is slightly less +- // direct, but we know that on 2.1/2.2 queries, DISTINCT queries are the only CQL queries that have countCQL3Rows to false so we use +- // that fact. +- boolean isDistinct = compositesToGroup == -2 || (compositesToGroup != -1 && !countCQL3Rows); +- DataLimits limits; +- if (isDistinct) +- limits = DataLimits.distinctLimits(maxResults); +- else if (compositesToGroup == -1) +- limits = DataLimits.thriftLimits(maxResults, perPartitionLimit); +- else +- limits = DataLimits.cqlLimits(maxResults); +- +- return new PartitionRangeReadCommand(false, 0, true, metadata, nowInSec, selection, rowFilter, limits, new DataRange(keyRange, filter), Optional.empty()); +- } +- +- static void serializeRowFilter(DataOutputPlus out, RowFilter rowFilter) throws IOException +- { +- ArrayList indexExpressions = Lists.newArrayList(rowFilter.iterator()); +- out.writeInt(indexExpressions.size()); +- for (RowFilter.Expression expression : indexExpressions) +- { +- ByteBufferUtil.writeWithShortLength(expression.column().name.bytes, out); +- expression.operator().writeTo(out); +- ByteBufferUtil.writeWithShortLength(expression.getIndexValue(), out); +- } +- } +- +- static RowFilter deserializeRowFilter(DataInputPlus in, CFMetaData metadata) throws IOException +- { +- int numRowFilters = in.readInt(); +- if (numRowFilters == 0) +- return RowFilter.NONE; +- +- RowFilter rowFilter = RowFilter.create(numRowFilters); +- for (int i = 0; i < numRowFilters; i++) +- { +- ByteBuffer columnName = ByteBufferUtil.readWithShortLength(in); +- ColumnDefinition column = metadata.getColumnDefinition(columnName); +- Operator op = Operator.readFrom(in); +- ByteBuffer indexValue = ByteBufferUtil.readWithShortLength(in); +- rowFilter.add(column, op, indexValue); +- } +- return rowFilter; +- } +- +- static long serializedRowFilterSize(RowFilter rowFilter) +- { +- long size = TypeSizes.sizeof(0); // rowFilterCount +- for (RowFilter.Expression expression : rowFilter) +- { +- size += ByteBufferUtil.serializedSizeWithShortLength(expression.column().name.bytes); +- size += TypeSizes.sizeof(0); // operator int value +- size += ByteBufferUtil.serializedSizeWithShortLength(expression.getIndexValue()); +- } +- return size; +- } +- +- public long serializedSize(ReadCommand command, int version) +- { +- assert version < MessagingService.VERSION_30; +- assert command.kind == Kind.PARTITION_RANGE; +- +- PartitionRangeReadCommand rangeCommand = (PartitionRangeReadCommand) command; +- rangeCommand = maybeConvertNamesToSlice(rangeCommand); +- CFMetaData metadata = rangeCommand.metadata(); +- +- long size = TypeSizes.sizeof(metadata.ksName); +- size += TypeSizes.sizeof(metadata.cfName); +- size += TypeSizes.sizeof((long) rangeCommand.nowInSec()); +- +- size += 1; // single byte flag: 0 for slices, 1 for names +- if (rangeCommand.isNamesQuery()) +- { +- PartitionColumns columns = rangeCommand.columnFilter().fetchedColumns(); +- ClusteringIndexNamesFilter filter = (ClusteringIndexNamesFilter) rangeCommand.dataRange().clusteringIndexFilter; +- size += LegacyReadCommandSerializer.serializedNamesFilterSize(filter, metadata, columns); +- } +- else +- { +- ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter) rangeCommand.dataRange().clusteringIndexFilter; +- boolean makeStaticSlice = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() && !filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING); +- size += LegacyReadCommandSerializer.serializedSlicesSize(filter.requestedSlices(), makeStaticSlice, metadata); +- size += TypeSizes.sizeof(filter.isReversed()); +- size += TypeSizes.sizeof(rangeCommand.limits().perPartitionCount()); +- size += TypeSizes.sizeof(0); // compositesToGroup +- } +- +- if (rangeCommand.rowFilter().equals(RowFilter.NONE)) +- { +- size += TypeSizes.sizeof(0); +- } +- else +- { +- ArrayList indexExpressions = Lists.newArrayList(rangeCommand.rowFilter().iterator()); +- size += TypeSizes.sizeof(indexExpressions.size()); +- for (RowFilter.Expression expression : indexExpressions) +- { +- size += ByteBufferUtil.serializedSizeWithShortLength(expression.column().name.bytes); +- size += TypeSizes.sizeof(expression.operator().ordinal()); +- size += ByteBufferUtil.serializedSizeWithShortLength(expression.getIndexValue()); +- } +- } +- +- size += AbstractBounds.rowPositionSerializer.serializedSize(rangeCommand.dataRange().keyRange(), version); +- size += TypeSizes.sizeof(rangeCommand.limits().count()); +- size += TypeSizes.sizeof(!rangeCommand.isForThrift()); +- return size + TypeSizes.sizeof(rangeCommand.dataRange().isPaging()); +- } +- +- static PartitionRangeReadCommand maybeConvertNamesToSlice(PartitionRangeReadCommand command) +- { +- if (!command.dataRange().isNamesQuery()) +- return command; +- +- CFMetaData metadata = command.metadata(); +- if (!LegacyReadCommandSerializer.shouldConvertNamesToSlice(metadata, command.columnFilter().fetchedColumns())) +- return command; +- +- ClusteringIndexNamesFilter filter = (ClusteringIndexNamesFilter) command.dataRange().clusteringIndexFilter; +- ClusteringIndexSliceFilter sliceFilter = LegacyReadCommandSerializer.convertNamesFilterToSliceFilter(filter, metadata); +- DataRange newRange = new DataRange(command.dataRange().keyRange(), sliceFilter); +- return new PartitionRangeReadCommand( +- command.isDigestQuery(), command.digestVersion(), command.isForThrift(), metadata, command.nowInSec(), +- command.columnFilter(), command.rowFilter(), command.limits(), newRange, Optional.empty()); +- } +- +- static ColumnFilter getColumnSelectionForSlice(boolean selectsStatics, int compositesToGroup, CFMetaData metadata) +- { +- // A value of -2 indicates this is a DISTINCT query that doesn't select static columns, only partition keys. +- // In that case, we'll basically be querying the first row of the partition, but we must make sure we include +- // all columns so we get at least one cell if there is a live row as it would confuse pre-3.0 nodes otherwise. +- if (compositesToGroup == -2) +- return ColumnFilter.all(metadata); +- +- // if a slice query from a pre-3.0 node doesn't cover statics, we shouldn't select them at all +- PartitionColumns columns = selectsStatics +- ? metadata.partitionColumns() +- : metadata.partitionColumns().withoutStatics(); +- return ColumnFilter.selectionBuilder().addAll(columns).build(); +- } +- } +- +- /** +- * Serializer for pre-3.0 PagedRangeCommands. +- */ +- private static class LegacyPagedRangeCommandSerializer implements IVersionedSerializer +- { +- public void serialize(ReadCommand command, DataOutputPlus out, int version) throws IOException +- { +- assert version < MessagingService.VERSION_30; +- +- PartitionRangeReadCommand rangeCommand = (PartitionRangeReadCommand) command; +- assert rangeCommand.dataRange().isPaging(); +- +- CFMetaData metadata = rangeCommand.metadata(); +- +- out.writeUTF(metadata.ksName); +- out.writeUTF(metadata.cfName); +- out.writeLong(rangeCommand.nowInSec() * 1000L); // convert from seconds to millis +- +- AbstractBounds.rowPositionSerializer.serialize(rangeCommand.dataRange().keyRange(), out, version); +- +- // pre-3.0 nodes don't accept names filters for paged range commands +- ClusteringIndexSliceFilter filter; +- if (rangeCommand.dataRange().clusteringIndexFilter.kind() == ClusteringIndexFilter.Kind.NAMES) +- filter = LegacyReadCommandSerializer.convertNamesFilterToSliceFilter((ClusteringIndexNamesFilter) rangeCommand.dataRange().clusteringIndexFilter, metadata); +- else +- filter = (ClusteringIndexSliceFilter) rangeCommand.dataRange().clusteringIndexFilter; +- +- // slice filter +- boolean makeStaticSlice = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() && !filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING); +- LegacyReadCommandSerializer.serializeSlices(out, filter.requestedSlices(), filter.isReversed(), makeStaticSlice, metadata); +- out.writeBoolean(filter.isReversed()); +- +- // slice filter's count +- DataLimits.Kind kind = rangeCommand.limits().kind(); +- boolean isDistinct = (kind == DataLimits.Kind.CQL_LIMIT || kind == DataLimits.Kind.CQL_PAGING_LIMIT) && rangeCommand.limits().perPartitionCount() == 1; +- if (isDistinct) +- out.writeInt(1); +- else +- out.writeInt(LegacyReadCommandSerializer.updateLimitForQuery(rangeCommand.limits().perPartitionCount(), filter.requestedSlices())); +- +- // compositesToGroup +- boolean selectsStatics = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() || filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING); +- int compositesToGroup; +- if (kind == DataLimits.Kind.THRIFT_LIMIT) +- compositesToGroup = -1; +- else if (isDistinct && !selectsStatics) +- compositesToGroup = -2; // for DISTINCT queries (CASSANDRA-8490) +- else +- compositesToGroup = metadata.isDense() ? -1 : metadata.clusteringColumns().size(); +- +- out.writeInt(compositesToGroup); +- +- // command-level "start" and "stop" composites. The start is the last-returned cell name if there is one, +- // otherwise it's the same as the slice filter's start. The stop appears to always be the same as the +- // slice filter's stop. +- DataRange.Paging pagingRange = (DataRange.Paging) rangeCommand.dataRange(); +- Clustering lastReturned = pagingRange.getLastReturned(); +- ClusteringBound newStart = ClusteringBound.inclusiveStartOf(lastReturned); +- Slice lastSlice = filter.requestedSlices().get(filter.requestedSlices().size() - 1); +- ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeBound(metadata, newStart, true), out); +- ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeClustering(metadata, lastSlice.end().clustering()), out); +- +- LegacyRangeSliceCommandSerializer.serializeRowFilter(out, rangeCommand.rowFilter()); +- +- // command-level limit +- // Pre-3.0 we would always request one more row than we actually needed and the command-level "start" would +- // be the last-returned cell name, so the response would always include it. +- int maxResults = rangeCommand.limits().count() + 1; +- out.writeInt(maxResults); +- +- // countCQL3Rows +- if (rangeCommand.isForThrift() || rangeCommand.limits().perPartitionCount() == 1) // for Thrift or DISTINCT +- out.writeBoolean(false); +- else +- out.writeBoolean(true); +- } +- +- public ReadCommand deserialize(DataInputPlus in, int version) throws IOException +- { +- assert version < MessagingService.VERSION_30; +- +- String keyspace = in.readUTF(); +- String columnFamily = in.readUTF(); +- +- CFMetaData metadata = Schema.instance.getCFMetaData(keyspace, columnFamily); +- if (metadata == null) +- { +- String message = String.format("Got legacy paged range command for nonexistent table %s.%s.", keyspace, columnFamily); +- throw new UnknownColumnFamilyException(message, null); +- } +- +- int nowInSec = (int) (in.readLong() / 1000); // convert from millis to seconds +- AbstractBounds keyRange = AbstractBounds.rowPositionSerializer.deserialize(in, metadata.partitioner, version); +- +- Pair p = LegacyReadCommandSerializer.deserializeSlicePartitionFilter(in, metadata); +- ClusteringIndexSliceFilter filter = p.left; +- boolean selectsStatics = p.right; +- +- int perPartitionLimit = in.readInt(); +- int compositesToGroup = in.readInt(); +- +- // command-level Composite "start" and "stop" +- LegacyLayout.LegacyBound startBound = LegacyLayout.decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), true); +- +- ByteBufferUtil.readWithShortLength(in); // the composite "stop", which isn't actually needed +- +- ColumnFilter selection = LegacyRangeSliceCommandSerializer.getColumnSelectionForSlice(selectsStatics, compositesToGroup, metadata); +- +- RowFilter rowFilter = LegacyRangeSliceCommandSerializer.deserializeRowFilter(in, metadata); +- int maxResults = in.readInt(); +- boolean countCQL3Rows = in.readBoolean(); +- +- // We have 2 types of DISTINCT queries: ones on only the partition key, and ones on the partition key and static columns. For the former, +- // we can easily detect the case because compositeToGroup is -2 and that's the only case it can be that. The latter one is slightly less +- // direct, but we know that on 2.1/2.2 queries, DISTINCT queries are the only CQL queries that have countCQL3Rows to false so we use +- // that fact. +- boolean isDistinct = compositesToGroup == -2 || (compositesToGroup != -1 && !countCQL3Rows); +- DataLimits limits; +- if (isDistinct) +- limits = DataLimits.distinctLimits(maxResults); +- else +- limits = DataLimits.cqlLimits(maxResults); +- +- limits = limits.forPaging(maxResults); +- +- // The pagedRangeCommand is used in pre-3.0 for both the first page and the following ones. On the first page, the startBound will be +- // the start of the overall slice and will not be a proper Clustering. So detect that case and just return a non-paging DataRange, which +- // is what 3.0 does. +- DataRange dataRange = new DataRange(keyRange, filter); +- Slices slices = filter.requestedSlices(); +- if (!isDistinct && startBound != LegacyLayout.LegacyBound.BOTTOM && !startBound.bound.equals(slices.get(0).start())) +- { +- // pre-3.0 nodes normally expect pages to include the last cell from the previous page, but they handle it +- // missing without any problems, so we can safely always set "inclusive" to false in the data range +- dataRange = dataRange.forPaging(keyRange, metadata.comparator, startBound.getAsClustering(metadata), false); +- } +- return new PartitionRangeReadCommand(false, 0, true, metadata, nowInSec, selection, rowFilter, limits, dataRange, Optional.empty()); +- } +- +- public long serializedSize(ReadCommand command, int version) +- { +- assert version < MessagingService.VERSION_30; +- assert command.kind == Kind.PARTITION_RANGE; +- +- PartitionRangeReadCommand rangeCommand = (PartitionRangeReadCommand) command; +- CFMetaData metadata = rangeCommand.metadata(); +- assert rangeCommand.dataRange().isPaging(); +- +- long size = TypeSizes.sizeof(metadata.ksName); +- size += TypeSizes.sizeof(metadata.cfName); +- size += TypeSizes.sizeof((long) rangeCommand.nowInSec()); +- +- size += AbstractBounds.rowPositionSerializer.serializedSize(rangeCommand.dataRange().keyRange(), version); +- +- // pre-3.0 nodes only accept slice filters for paged range commands +- ClusteringIndexSliceFilter filter; +- if (rangeCommand.dataRange().clusteringIndexFilter.kind() == ClusteringIndexFilter.Kind.NAMES) +- filter = LegacyReadCommandSerializer.convertNamesFilterToSliceFilter((ClusteringIndexNamesFilter) rangeCommand.dataRange().clusteringIndexFilter, metadata); +- else +- filter = (ClusteringIndexSliceFilter) rangeCommand.dataRange().clusteringIndexFilter; +- +- // slice filter +- boolean makeStaticSlice = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() && !filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING); +- size += LegacyReadCommandSerializer.serializedSlicesSize(filter.requestedSlices(), makeStaticSlice, metadata); +- size += TypeSizes.sizeof(filter.isReversed()); +- +- // slice filter's count +- size += TypeSizes.sizeof(rangeCommand.limits().perPartitionCount()); +- +- // compositesToGroup +- size += TypeSizes.sizeof(0); +- +- // command-level Composite "start" and "stop" +- DataRange.Paging pagingRange = (DataRange.Paging) rangeCommand.dataRange(); +- Clustering lastReturned = pagingRange.getLastReturned(); +- Slice lastSlice = filter.requestedSlices().get(filter.requestedSlices().size() - 1); +- size += ByteBufferUtil.serializedSizeWithShortLength(LegacyLayout.encodeClustering(metadata, lastReturned)); +- size += ByteBufferUtil.serializedSizeWithShortLength(LegacyLayout.encodeClustering(metadata, lastSlice.end().clustering())); +- +- size += LegacyRangeSliceCommandSerializer.serializedRowFilterSize(rangeCommand.rowFilter()); +- +- // command-level limit +- size += TypeSizes.sizeof(rangeCommand.limits().count()); +- +- // countCQL3Rows +- return size + TypeSizes.sizeof(true); +- } +- } +- +- /** +- * Serializer for pre-3.0 ReadCommands. +- */ +- static class LegacyReadCommandSerializer implements IVersionedSerializer +- { +- public void serialize(ReadCommand command, DataOutputPlus out, int version) throws IOException +- { +- assert version < MessagingService.VERSION_30; +- assert command.kind == Kind.SINGLE_PARTITION; +- +- SinglePartitionReadCommand singleReadCommand = (SinglePartitionReadCommand) command; +- singleReadCommand = maybeConvertNamesToSlice(singleReadCommand); +- +- CFMetaData metadata = singleReadCommand.metadata(); +- +- out.writeByte(LegacyType.fromPartitionFilterKind(singleReadCommand.clusteringIndexFilter().kind()).serializedValue); +- +- out.writeBoolean(singleReadCommand.isDigestQuery()); +- out.writeUTF(metadata.ksName); +- ByteBufferUtil.writeWithShortLength(singleReadCommand.partitionKey().getKey(), out); +- out.writeUTF(metadata.cfName); +- out.writeLong(singleReadCommand.nowInSec() * 1000L); // convert from seconds to millis +- +- if (singleReadCommand.clusteringIndexFilter().kind() == ClusteringIndexFilter.Kind.SLICE) +- serializeSliceCommand(singleReadCommand, out); +- else +- serializeNamesCommand(singleReadCommand, out); +- } +- +- public ReadCommand deserialize(DataInputPlus in, int version) throws IOException +- { +- assert version < MessagingService.VERSION_30; +- LegacyType msgType = LegacyType.fromSerializedValue(in.readByte()); +- +- boolean isDigest = in.readBoolean(); +- String keyspaceName = in.readUTF(); +- ByteBuffer key = ByteBufferUtil.readWithShortLength(in); +- String cfName = in.readUTF(); +- long nowInMillis = in.readLong(); +- int nowInSeconds = (int) (nowInMillis / 1000); // convert from millis to seconds +- CFMetaData metadata = Schema.instance.getCFMetaData(keyspaceName, cfName); +- DecoratedKey dk = metadata.partitioner.decorateKey(key); +- +- switch (msgType) +- { +- case GET_BY_NAMES: +- return deserializeNamesCommand(in, isDigest, metadata, dk, nowInSeconds, version); +- case GET_SLICES: +- return deserializeSliceCommand(in, isDigest, metadata, dk, nowInSeconds, version); +- default: +- throw new AssertionError(); +- } +- } +- +- public long serializedSize(ReadCommand command, int version) +- { +- assert version < MessagingService.VERSION_30; +- assert command.kind == Kind.SINGLE_PARTITION; +- SinglePartitionReadCommand singleReadCommand = (SinglePartitionReadCommand) command; +- singleReadCommand = maybeConvertNamesToSlice(singleReadCommand); +- +- int keySize = singleReadCommand.partitionKey().getKey().remaining(); +- +- CFMetaData metadata = singleReadCommand.metadata(); +- +- long size = 1; // message type (single byte) +- size += TypeSizes.sizeof(command.isDigestQuery()); +- size += TypeSizes.sizeof(metadata.ksName); +- size += TypeSizes.sizeof((short) keySize) + keySize; +- size += TypeSizes.sizeof((long) command.nowInSec()); +- +- if (singleReadCommand.clusteringIndexFilter().kind() == ClusteringIndexFilter.Kind.SLICE) +- return size + serializedSliceCommandSize(singleReadCommand); +- else +- return size + serializedNamesCommandSize(singleReadCommand); +- } +- +- private void serializeNamesCommand(SinglePartitionReadCommand command, DataOutputPlus out) throws IOException +- { +- serializeNamesFilter(command, (ClusteringIndexNamesFilter)command.clusteringIndexFilter(), out); +- } +- +- private static void serializeNamesFilter(ReadCommand command, ClusteringIndexNamesFilter filter, DataOutputPlus out) throws IOException +- { +- PartitionColumns columns = command.columnFilter().fetchedColumns(); +- CFMetaData metadata = command.metadata(); +- SortedSet requestedRows = filter.requestedRows(); +- +- if (requestedRows.isEmpty()) +- { +- // only static columns are requested +- out.writeInt(columns.size()); +- for (ColumnDefinition column : columns) +- ByteBufferUtil.writeWithShortLength(column.name.bytes, out); +- } +- else +- { +- out.writeInt(requestedRows.size() * columns.size()); +- for (Clustering clustering : requestedRows) +- { +- for (ColumnDefinition column : columns) +- ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeCellName(metadata, clustering, column.name.bytes, null), out); +- } +- } +- +- // countCql3Rows should be true if it's not for Thrift or a DISTINCT query +- if (command.isForThrift() || (command.limits().kind() == DataLimits.Kind.CQL_LIMIT && command.limits().perPartitionCount() == 1)) +- out.writeBoolean(false); // it's compact and not a DISTINCT query +- else +- out.writeBoolean(true); +- } +- +- static long serializedNamesFilterSize(ClusteringIndexNamesFilter filter, CFMetaData metadata, PartitionColumns fetchedColumns) +- { +- SortedSet requestedRows = filter.requestedRows(); +- +- long size = 0; +- if (requestedRows.isEmpty()) +- { +- // only static columns are requested +- size += TypeSizes.sizeof(fetchedColumns.size()); +- for (ColumnDefinition column : fetchedColumns) +- size += ByteBufferUtil.serializedSizeWithShortLength(column.name.bytes); +- } +- else +- { +- size += TypeSizes.sizeof(requestedRows.size() * fetchedColumns.size()); +- for (Clustering clustering : requestedRows) +- { +- for (ColumnDefinition column : fetchedColumns) +- size += ByteBufferUtil.serializedSizeWithShortLength(LegacyLayout.encodeCellName(metadata, clustering, column.name.bytes, null)); +- } +- } +- +- return size + TypeSizes.sizeof(true); // countCql3Rows +- } +- +- private SinglePartitionReadCommand deserializeNamesCommand(DataInputPlus in, boolean isDigest, CFMetaData metadata, DecoratedKey key, int nowInSeconds, int version) throws IOException +- { +- Pair selectionAndFilter = deserializeNamesSelectionAndFilter(in, metadata); +- +- // messages from old nodes will expect the thrift format, so always use 'true' for isForThrift +- return new SinglePartitionReadCommand( +- isDigest, version, true, metadata, nowInSeconds, selectionAndFilter.left, RowFilter.NONE, DataLimits.NONE, +- key, selectionAndFilter.right); +- } +- +- static Pair deserializeNamesSelectionAndFilter(DataInputPlus in, CFMetaData metadata) throws IOException +- { +- int numCellNames = in.readInt(); +- +- // The names filter could include either a) static columns or b) normal columns with the clustering columns +- // fully specified. We need to handle those cases differently in 3.0. +- NavigableSet clusterings = new TreeSet<>(metadata.comparator); +- +- ColumnFilter.Builder selectionBuilder = ColumnFilter.selectionBuilder(); +- for (int i = 0; i < numCellNames; i++) +- { +- ByteBuffer buffer = ByteBufferUtil.readWithShortLength(in); +- LegacyLayout.LegacyCellName cellName; +- try +- { +- cellName = LegacyLayout.decodeCellName(metadata, buffer); +- } +- catch (UnknownColumnException exc) +- { +- // TODO this probably needs a new exception class that shares a parent with UnknownColumnFamilyException +- throw new UnknownColumnFamilyException( +- "Received legacy range read command with names filter for unrecognized column name. " + +- "Fill name in filter (hex): " + ByteBufferUtil.bytesToHex(buffer), metadata.cfId); +- } +- +- // If we're querying for a static column, we may also need to read it +- // as if it were a thrift dynamic column (because the column metadata, +- // which makes it a static column in 3.0+, may have been added *after* +- // some values were written). Note that all cql queries on non-compact +- // tables used slice & not name filters prior to 3.0 so this path is +- // not taken for non-compact tables. It is theoretically possible to +- // get here via thrift, hence the check on metadata.isStaticCompactTable. +- // See CASSANDRA-11087. +- if (metadata.isStaticCompactTable() && cellName.clustering.equals(Clustering.STATIC_CLUSTERING)) +- { +- clusterings.add(Clustering.make(cellName.column.name.bytes)); +- selectionBuilder.add(metadata.compactValueColumn()); +- } +- else +- { +- clusterings.add(cellName.clustering); +- } +- +- selectionBuilder.add(cellName.column); +- } +- +- // for compact storage tables without clustering keys, the column holding the selected value is named +- // 'value' internally we add it to the selection here to prevent errors due to unexpected column names +- // when serializing the initial local data response +- if (metadata.isStaticCompactTable() && clusterings.isEmpty()) +- selectionBuilder.addAll(metadata.partitionColumns()); +- +- in.readBoolean(); // countCql3Rows +- +- // clusterings cannot include STATIC_CLUSTERING, so if the names filter is for static columns, clusterings +- // will be empty. However, by requesting the static columns in our ColumnFilter, this will still work. +- ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings, false); +- return Pair.create(selectionBuilder.build(), filter); +- } +- +- private long serializedNamesCommandSize(SinglePartitionReadCommand command) +- { +- ClusteringIndexNamesFilter filter = (ClusteringIndexNamesFilter)command.clusteringIndexFilter(); +- PartitionColumns columns = command.columnFilter().fetchedColumns(); +- return serializedNamesFilterSize(filter, command.metadata(), columns); +- } +- +- private void serializeSliceCommand(SinglePartitionReadCommand command, DataOutputPlus out) throws IOException +- { +- CFMetaData metadata = command.metadata(); +- ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter(); +- +- Slices slices = filter.requestedSlices(); +- boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING); +- serializeSlices(out, slices, filter.isReversed(), makeStaticSlice, metadata); +- +- out.writeBoolean(filter.isReversed()); +- +- boolean selectsStatics = !command.columnFilter().fetchedColumns().statics.isEmpty() || slices.selects(Clustering.STATIC_CLUSTERING); +- DataLimits limits = command.limits(); +- if (limits.isDistinct()) +- out.writeInt(1); // the limit is always 1 for DISTINCT queries +- else +- out.writeInt(updateLimitForQuery(command.limits().count(), filter.requestedSlices())); +- +- int compositesToGroup; +- if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT || metadata.isDense()) +- compositesToGroup = -1; +- else if (limits.isDistinct() && !selectsStatics) +- compositesToGroup = -2; // for DISTINCT queries (CASSANDRA-8490) +- else +- compositesToGroup = metadata.clusteringColumns().size(); +- +- out.writeInt(compositesToGroup); +- } +- +- private SinglePartitionReadCommand deserializeSliceCommand(DataInputPlus in, boolean isDigest, CFMetaData metadata, DecoratedKey key, int nowInSeconds, int version) throws IOException +- { +- Pair p = deserializeSlicePartitionFilter(in, metadata); +- ClusteringIndexSliceFilter filter = p.left; +- boolean selectsStatics = p.right; +- int count = in.readInt(); +- int compositesToGroup = in.readInt(); +- +- // if a slice query from a pre-3.0 node doesn't cover statics, we shouldn't select them at all +- ColumnFilter columnFilter = LegacyRangeSliceCommandSerializer.getColumnSelectionForSlice(selectsStatics, compositesToGroup, metadata); +- +- // We have 2 types of DISTINCT queries: ones on only the partition key, and ones on the partition key and static columns. For the former, +- // we can easily detect the case because compositeToGroup is -2 and that's the only case it can be that. The latter is probablematic +- // however as we have no way to distinguish it from a normal select with a limit of 1 (and this, contrarily to the range query case +- // were the countCQL3Rows boolean allows us to decide). +- // So we consider this case not distinct here. This is ok because even if it is a distinct (with static), the count will be 1 and +- // we'll still just query one row (a distinct DataLimits currently behave exactly like a CQL limit with a count of 1). The only +- // drawback is that we'll send back the first row entirely while a 2.1/2.2 node would return only the first cell in that same +- // situation. This isn't a problem for 2.1/2.2 code however (it would be for a range query, as it would throw off the count for +- // reasons similar to CASSANDRA-10762, but it's ok for single partition queries). +- // We do _not_ want to do the reverse however and consider a 'SELECT * FROM foo LIMIT 1' as a DISTINCT query as that would make +- // us only return the 1st cell rather then 1st row. +- DataLimits limits; +- if (compositesToGroup == -2) +- limits = DataLimits.distinctLimits(count); // See CASSANDRA-8490 for the explanation of this value +- else if (compositesToGroup == -1) +- limits = DataLimits.thriftLimits(1, count); +- else +- limits = DataLimits.cqlLimits(count); +- +- // messages from old nodes will expect the thrift format, so always use 'true' for isForThrift +- return new SinglePartitionReadCommand(isDigest, version, true, metadata, nowInSeconds, columnFilter, RowFilter.NONE, limits, key, filter); +- } +- +- private long serializedSliceCommandSize(SinglePartitionReadCommand command) +- { +- CFMetaData metadata = command.metadata(); +- ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter(); +- +- Slices slices = filter.requestedSlices(); +- boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING); +- +- long size = serializedSlicesSize(slices, makeStaticSlice, metadata); +- size += TypeSizes.sizeof(command.clusteringIndexFilter().isReversed()); +- size += TypeSizes.sizeof(command.limits().count()); +- return size + TypeSizes.sizeof(0); // compositesToGroup +- } +- +- static void serializeSlices(DataOutputPlus out, Slices slices, boolean isReversed, boolean makeStaticSlice, CFMetaData metadata) throws IOException +- { +- out.writeInt(slices.size() + (makeStaticSlice ? 1 : 0)); +- +- // In 3.0 we always store the slices in normal comparator order. Pre-3.0 nodes expect the slices to +- // be in reversed order if the query is reversed, so we handle that here. +- if (isReversed) +- { +- for (int i = slices.size() - 1; i >= 0; i--) +- serializeSlice(out, slices.get(i), true, metadata); +- if (makeStaticSlice) +- serializeStaticSlice(out, true, metadata); +- } +- else +- { +- if (makeStaticSlice) +- serializeStaticSlice(out, false, metadata); +- for (Slice slice : slices) +- serializeSlice(out, slice, false, metadata); +- } +- } +- +- static long serializedSlicesSize(Slices slices, boolean makeStaticSlice, CFMetaData metadata) +- { +- long size = TypeSizes.sizeof(slices.size()); +- +- for (Slice slice : slices) +- { +- ByteBuffer sliceStart = LegacyLayout.encodeBound(metadata, slice.start(), true); +- size += ByteBufferUtil.serializedSizeWithShortLength(sliceStart); +- ByteBuffer sliceEnd = LegacyLayout.encodeBound(metadata, slice.end(), false); +- size += ByteBufferUtil.serializedSizeWithShortLength(sliceEnd); +- } +- +- if (makeStaticSlice) +- size += serializedStaticSliceSize(metadata); +- +- return size; +- } +- +- static long serializedStaticSliceSize(CFMetaData metadata) +- { +- // unlike serializeStaticSlice(), but we don't care about reversal for size calculations +- ByteBuffer sliceStart = LegacyLayout.encodeBound(metadata, ClusteringBound.BOTTOM, false); +- long size = ByteBufferUtil.serializedSizeWithShortLength(sliceStart); +- +- size += TypeSizes.sizeof((short) (metadata.comparator.size() * 3 + 2)); +- size += TypeSizes.sizeof((short) LegacyLayout.STATIC_PREFIX); +- for (int i = 0; i < metadata.comparator.size(); i++) +- { +- size += ByteBufferUtil.serializedSizeWithShortLength(ByteBufferUtil.EMPTY_BYTE_BUFFER); +- size += 1; // EOC +- } +- return size; +- } +- +- private static void serializeSlice(DataOutputPlus out, Slice slice, boolean isReversed, CFMetaData metadata) throws IOException +- { +- ByteBuffer sliceStart = LegacyLayout.encodeBound(metadata, isReversed ? slice.end() : slice.start(), !isReversed); +- ByteBufferUtil.writeWithShortLength(sliceStart, out); +- +- ByteBuffer sliceEnd = LegacyLayout.encodeBound(metadata, isReversed ? slice.start() : slice.end(), isReversed); +- ByteBufferUtil.writeWithShortLength(sliceEnd, out); +- } +- +- private static void serializeStaticSlice(DataOutputPlus out, boolean isReversed, CFMetaData metadata) throws IOException +- { +- // if reversed, write an empty bound for the slice start; if reversed, write out an empty bound for the +- // slice finish after we've written the static slice start +- if (!isReversed) +- { +- ByteBuffer sliceStart = LegacyLayout.encodeBound(metadata, ClusteringBound.BOTTOM, false); +- ByteBufferUtil.writeWithShortLength(sliceStart, out); +- } +- +- // write out the length of the composite +- out.writeShort(2 + metadata.comparator.size() * 3); // two bytes + EOC for each component, plus static prefix +- out.writeShort(LegacyLayout.STATIC_PREFIX); +- for (int i = 0; i < metadata.comparator.size(); i++) +- { +- ByteBufferUtil.writeWithShortLength(ByteBufferUtil.EMPTY_BYTE_BUFFER, out); +- // write the EOC, using an inclusive end if we're on the final component +- out.writeByte(i == metadata.comparator.size() - 1 ? 1 : 0); +- } +- +- if (isReversed) +- { +- ByteBuffer sliceStart = LegacyLayout.encodeBound(metadata, ClusteringBound.BOTTOM, false); +- ByteBufferUtil.writeWithShortLength(sliceStart, out); +- } +- } +- +- // Returns the deserialized filter, and whether static columns are queried (in pre-3.0, both info are determined by the slices, +- // but in 3.0 they are separated: whether static columns are queried or not depends on the ColumnFilter). +- static Pair deserializeSlicePartitionFilter(DataInputPlus in, CFMetaData metadata) throws IOException +- { +- int numSlices = in.readInt(); +- ByteBuffer[] startBuffers = new ByteBuffer[numSlices]; +- ByteBuffer[] finishBuffers = new ByteBuffer[numSlices]; +- for (int i = 0; i < numSlices; i++) +- { +- startBuffers[i] = ByteBufferUtil.readWithShortLength(in); +- finishBuffers[i] = ByteBufferUtil.readWithShortLength(in); +- } +- +- boolean reversed = in.readBoolean(); +- +- if (reversed) +- { +- // pre-3.0, reversed query slices put the greater element at the start of the slice +- ByteBuffer[] tmp = finishBuffers; +- finishBuffers = startBuffers; +- startBuffers = tmp; +- } +- +- boolean selectsStatics = false; +- Slices.Builder slicesBuilder = new Slices.Builder(metadata.comparator); +- for (int i = 0; i < numSlices; i++) +- { +- LegacyLayout.LegacyBound start = LegacyLayout.decodeBound(metadata, startBuffers[i], true); +- LegacyLayout.LegacyBound finish = LegacyLayout.decodeBound(metadata, finishBuffers[i], false); +- +- if (start.isStatic) +- { +- // If we start at the static block, this means we start at the beginning of the partition in 3.0 +- // terms (since 3.0 handles static outside of the slice). +- start = LegacyLayout.LegacyBound.BOTTOM; +- +- // Then if we include the static, records it +- if (start.bound.isInclusive()) +- selectsStatics = true; +- } +- else if (start == LegacyLayout.LegacyBound.BOTTOM) +- { +- selectsStatics = true; +- } +- +- // If the end of the slice is the end of the statics, then that mean this slice was just selecting static +- // columns. We have already recorded that in selectsStatics, so we can ignore the slice (which doesn't make +- // sense for 3.0). +- if (finish.isStatic) +- { +- assert finish.bound.isInclusive(); // it would make no sense for a pre-3.0 node to have a slice that stops +- // before the static columns (since there is nothing before that) +- continue; +- } +- +- slicesBuilder.add(Slice.make(start.bound, finish.bound)); +- } +- +- return Pair.create(new ClusteringIndexSliceFilter(slicesBuilder.build(), reversed), selectsStatics); +- } +- +- private static SinglePartitionReadCommand maybeConvertNamesToSlice(SinglePartitionReadCommand command) +- { +- if (command.clusteringIndexFilter().kind() != ClusteringIndexFilter.Kind.NAMES) +- return command; +- +- CFMetaData metadata = command.metadata(); +- +- if (!shouldConvertNamesToSlice(metadata, command.columnFilter().fetchedColumns())) +- return command; +- +- ClusteringIndexNamesFilter filter = (ClusteringIndexNamesFilter)command.clusteringIndexFilter(); +- ClusteringIndexSliceFilter sliceFilter = convertNamesFilterToSliceFilter(filter, metadata); +- return new SinglePartitionReadCommand( +- command.isDigestQuery(), command.digestVersion(), command.isForThrift(), metadata, command.nowInSec(), +- command.columnFilter(), command.rowFilter(), command.limits(), command.partitionKey(), sliceFilter); +- } +- +- /** +- * Returns true if a names filter on the given table and column selection should be converted to a slice +- * filter for compatibility with pre-3.0 nodes, false otherwise. +- */ +- static boolean shouldConvertNamesToSlice(CFMetaData metadata, PartitionColumns columns) +- { +- // On pre-3.0 nodes, due to CASSANDRA-5762, we always do a slice for CQL3 tables (not dense, composite). +- if (!metadata.isDense() && metadata.isCompound()) +- return true; +- +- // pre-3.0 nodes don't support names filters for reading collections, so if we're requesting any of those, +- // we need to convert this to a slice filter +- for (ColumnDefinition column : columns) +- { +- if (column.type.isMultiCell()) +- return true; +- } +- return false; +- } +- +- /** +- * Converts a names filter that is incompatible with pre-3.0 nodes to a slice filter that is compatible. +- */ +- private static ClusteringIndexSliceFilter convertNamesFilterToSliceFilter(ClusteringIndexNamesFilter filter, CFMetaData metadata) +- { +- SortedSet requestedRows = filter.requestedRows(); +- Slices slices; +- if (requestedRows.isEmpty()) +- { +- slices = Slices.NONE; +- } +- else if (requestedRows.size() == 1 && requestedRows.first().size() == 0) +- { +- slices = Slices.ALL; +- } +- else +- { +- Slices.Builder slicesBuilder = new Slices.Builder(metadata.comparator); +- for (Clustering clustering : requestedRows) +- slicesBuilder.add(ClusteringBound.inclusiveStartOf(clustering), ClusteringBound.inclusiveEndOf(clustering)); +- slices = slicesBuilder.build(); +- } +- +- return new ClusteringIndexSliceFilter(slices, filter.isReversed()); +- } +- +- /** +- * Potentially increases the existing query limit to account for the lack of exclusive bounds in pre-3.0 nodes. +- * @param limit the existing query limit +- * @param slices the requested slices +- * @return the updated limit +- */ +- static int updateLimitForQuery(int limit, Slices slices) +- { +- // Pre-3.0 nodes don't support exclusive bounds for slices. Instead, we query one more element if necessary +- // and filter it later (in LegacyRemoteDataResponse) +- if (!slices.hasLowerBound() && ! slices.hasUpperBound()) +- return limit; +- +- for (Slice slice : slices) +- { +- if (limit == Integer.MAX_VALUE) +- return limit; +- +- if (!slice.start().isInclusive()) +- limit++; +- if (!slice.end().isInclusive()) +- limit++; +- } +- return limit; +- } +- } + } +diff --git a/src/java/org/apache/cassandra/db/ReadResponse.java b/src/java/org/apache/cassandra/db/ReadResponse.java +index cca21f8..49112a0 100644 +--- a/src/java/org/apache/cassandra/db/ReadResponse.java ++++ b/src/java/org/apache/cassandra/db/ReadResponse.java +@@ -20,18 +20,13 @@ package org.apache.cassandra.db; + import java.io.*; + import java.nio.ByteBuffer; + import java.security.MessageDigest; +-import java.util.ArrayList; +-import java.util.Collections; + import java.util.List; + + import com.google.common.annotations.VisibleForTesting; + +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.db.filter.ClusteringIndexFilter; + import org.apache.cassandra.db.filter.ColumnFilter; + import org.apache.cassandra.db.rows.*; + import org.apache.cassandra.db.partitions.*; +-import org.apache.cassandra.dht.*; + import org.apache.cassandra.io.ForwardingVersionedSerializer; + import org.apache.cassandra.io.IVersionedSerializer; + import org.apache.cassandra.io.util.DataInputBuffer; +@@ -39,7 +34,6 @@ import org.apache.cassandra.io.util.DataInputPlus; + import org.apache.cassandra.io.util.DataOutputPlus; + import org.apache.cassandra.io.util.DataOutputBuffer; + import org.apache.cassandra.net.MessagingService; +-import org.apache.cassandra.thrift.ThriftResultsMerger; + import org.apache.cassandra.utils.ByteBufferUtil; + import org.apache.cassandra.utils.FBUtilities; + +@@ -47,28 +41,18 @@ public abstract class ReadResponse + { + // Serializer for single partition read response + public static final IVersionedSerializer serializer = new Serializer(); +- // Serializer for the pre-3.0 rang slice responses. +- public static final IVersionedSerializer legacyRangeSliceReplySerializer = new LegacyRangeSliceReplySerializer(); +- // Serializer for partition range read response (this actually delegate to 'serializer' in 3.0 and to +- // 'legacyRangeSliceReplySerializer' in older version. ++ // Serializer for partition range read response (this actually delegate to 'serializer' in 3.0) + public static final IVersionedSerializer rangeSliceSerializer = new ForwardingVersionedSerializer() + { + @Override + protected IVersionedSerializer delegate(int version) + { +- return version < MessagingService.VERSION_30 +- ? legacyRangeSliceReplySerializer +- : serializer; ++ return serializer; + } + }; + +- // This is used only when serializing data responses and we can't it easily in other cases. So this can be null, which is slighly +- // hacky, but as this hack doesn't escape this class, and it's easy enough to validate that it's not null when we need, it's "good enough". +- private final ReadCommand command; +- +- protected ReadResponse(ReadCommand command) ++ protected ReadResponse() + { +- this.command = command; + } + + public static ReadResponse createDataResponse(UnfilteredPartitionIterator data, ReadCommand command) +@@ -105,7 +89,7 @@ public abstract class ReadResponse + + private DigestResponse(ByteBuffer digest) + { +- super(null); ++ super(); + assert digest.hasRemaining(); + this.digest = digest; + } +@@ -135,7 +119,7 @@ public abstract class ReadResponse + { + private LocalDataResponse(UnfilteredPartitionIterator iter, ReadCommand command) + { +- super(command, build(iter, command.columnFilter()), SerializationHelper.Flag.LOCAL); ++ super(build(iter, command.columnFilter()), SerializationHelper.Flag.LOCAL); + } + + private static ByteBuffer build(UnfilteredPartitionIterator iter, ColumnFilter selection) +@@ -158,7 +142,7 @@ public abstract class ReadResponse + { + protected RemoteDataResponse(ByteBuffer data) + { +- super(null, data, SerializationHelper.Flag.FROM_REMOTE); ++ super(data, SerializationHelper.Flag.FROM_REMOTE); + } + } + +@@ -169,9 +153,9 @@ public abstract class ReadResponse + private final ByteBuffer data; + private final SerializationHelper.Flag flag; + +- protected DataResponse(ReadCommand command, ByteBuffer data, SerializationHelper.Flag flag) ++ protected DataResponse(ByteBuffer data, SerializationHelper.Flag flag) + { +- super(command); ++ super(); + this.data = data; + this.flag = flag; + } +@@ -210,130 +194,12 @@ public abstract class ReadResponse + } + } + +- /** +- * A remote response from a pre-3.0 node. This needs a separate class in order to cleanly handle trimming and +- * reversal of results when the read command calls for it. Pre-3.0 nodes always return results in the normal +- * sorted order, even if the query asks for reversed results. Additionally, pre-3.0 nodes do not have a notion of +- * exclusive slices on non-composite tables, so extra rows may need to be trimmed. +- */ +- @VisibleForTesting +- static class LegacyRemoteDataResponse extends ReadResponse +- { +- private final List partitions; +- +- @VisibleForTesting +- LegacyRemoteDataResponse(List partitions) +- { +- super(null); // we never serialize LegacyRemoteDataResponses, so we don't care about the command +- this.partitions = partitions; +- } +- +- public UnfilteredPartitionIterator makeIterator(final ReadCommand command) +- { +- // Due to a bug in the serialization of AbstractBounds, anything that isn't a Range is understood by pre-3.0 nodes +- // as a Bound, which means IncludingExcludingBounds and ExcludingBounds responses may include keys they shouldn't. +- // So filter partitions that shouldn't be included here. +- boolean skipFirst = false; +- boolean skipLast = false; +- if (!partitions.isEmpty() && command instanceof PartitionRangeReadCommand) +- { +- AbstractBounds keyRange = ((PartitionRangeReadCommand)command).dataRange().keyRange(); +- boolean isExcludingBounds = keyRange instanceof ExcludingBounds; +- skipFirst = isExcludingBounds && !keyRange.contains(partitions.get(0).partitionKey()); +- skipLast = (isExcludingBounds || keyRange instanceof IncludingExcludingBounds) && !keyRange.contains(partitions.get(partitions.size() - 1).partitionKey()); +- } +- +- final List toReturn; +- if (skipFirst || skipLast) +- { +- toReturn = partitions.size() == 1 +- ? Collections.emptyList() +- : partitions.subList(skipFirst ? 1 : 0, skipLast ? partitions.size() - 1 : partitions.size()); +- } +- else +- { +- toReturn = partitions; +- } +- +- return new AbstractUnfilteredPartitionIterator() +- { +- private int idx; +- +- public boolean isForThrift() +- { +- return true; +- } +- +- public CFMetaData metadata() +- { +- return command.metadata(); +- } +- +- public boolean hasNext() +- { +- return idx < toReturn.size(); +- } +- +- public UnfilteredRowIterator next() +- { +- ImmutableBTreePartition partition = toReturn.get(idx++); +- +- ClusteringIndexFilter filter = command.clusteringIndexFilter(partition.partitionKey()); +- +- // Pre-3.0, we would always request one more row than we actually needed and the command-level "start" would +- // be the last-returned cell name, so the response would always include it. +- UnfilteredRowIterator iterator = partition.unfilteredIterator(command.columnFilter(), filter.getSlices(command.metadata()), filter.isReversed()); +- +- // Wrap results with a ThriftResultMerger only if they're intended for the thrift command. +- if (command.isForThrift()) +- return ThriftResultsMerger.maybeWrap(iterator, command.nowInSec()); +- else +- return iterator; +- } +- }; +- } +- +- public ByteBuffer digest(ReadCommand command) +- { +- try (UnfilteredPartitionIterator iterator = makeIterator(command)) +- { +- return makeDigest(iterator, command); +- } +- } +- +- public boolean isDigestResponse() +- { +- return false; +- } +- } +- + private static class Serializer implements IVersionedSerializer + { + public void serialize(ReadResponse response, DataOutputPlus out, int version) throws IOException + { + boolean isDigest = response instanceof DigestResponse; + ByteBuffer digest = isDigest ? ((DigestResponse)response).digest : ByteBufferUtil.EMPTY_BYTE_BUFFER; +- if (version < MessagingService.VERSION_30) +- { +- out.writeInt(digest.remaining()); +- out.write(digest); +- out.writeBoolean(isDigest); +- if (!isDigest) +- { +- assert response.command != null; // we only serialize LocalDataResponse, which always has the command set +- try (UnfilteredPartitionIterator iter = response.makeIterator(response.command)) +- { +- assert iter.hasNext(); +- try (UnfilteredRowIterator partition = iter.next()) +- { +- ByteBufferUtil.writeWithShortLength(partition.partitionKey().getKey(), out); +- LegacyLayout.serializeAsLegacyPartition(response.command, partition, out, version); +- } +- assert !iter.hasNext(); +- } +- } +- return; +- } + + ByteBufferUtil.writeWithVIntLength(digest, out); + if (!isDigest) +@@ -345,34 +211,6 @@ public abstract class ReadResponse + + public ReadResponse deserialize(DataInputPlus in, int version) throws IOException + { +- if (version < MessagingService.VERSION_30) +- { +- byte[] digest = null; +- int digestSize = in.readInt(); +- if (digestSize > 0) +- { +- digest = new byte[digestSize]; +- in.readFully(digest, 0, digestSize); +- } +- boolean isDigest = in.readBoolean(); +- assert isDigest == digestSize > 0; +- if (isDigest) +- { +- assert digest != null; +- return new DigestResponse(ByteBuffer.wrap(digest)); +- } +- +- // ReadResponses from older versions are always single-partition (ranges are handled by RangeSliceReply) +- ByteBuffer key = ByteBufferUtil.readWithShortLength(in); +- try (UnfilteredRowIterator rowIterator = LegacyLayout.deserializeLegacyPartition(in, version, SerializationHelper.Flag.FROM_REMOTE, key)) +- { +- if (rowIterator == null) +- return new LegacyRemoteDataResponse(Collections.emptyList()); +- +- return new LegacyRemoteDataResponse(Collections.singletonList(ImmutableBTreePartition.create(rowIterator))); +- } +- } +- + ByteBuffer digest = ByteBufferUtil.readWithVIntLength(in); + if (digest.hasRemaining()) + return new DigestResponse(digest); +@@ -387,28 +225,6 @@ public abstract class ReadResponse + boolean isDigest = response instanceof DigestResponse; + ByteBuffer digest = isDigest ? ((DigestResponse)response).digest : ByteBufferUtil.EMPTY_BYTE_BUFFER; + +- if (version < MessagingService.VERSION_30) +- { +- long size = TypeSizes.sizeof(digest.remaining()) +- + digest.remaining() +- + TypeSizes.sizeof(isDigest); +- if (!isDigest) +- { +- assert response.command != null; // we only serialize LocalDataResponse, which always has the command set +- try (UnfilteredPartitionIterator iter = response.makeIterator(response.command)) +- { +- assert iter.hasNext(); +- try (UnfilteredRowIterator partition = iter.next()) +- { +- size += ByteBufferUtil.serializedSizeWithShortLength(partition.partitionKey().getKey()); +- size += LegacyLayout.serializedSizeAsLegacyPartition(response.command, partition, version); +- } +- assert !iter.hasNext(); +- } +- } +- return size; +- } +- + long size = ByteBufferUtil.serializedSizeWithVIntLength(digest); + if (!isDigest) + { +@@ -421,81 +237,4 @@ public abstract class ReadResponse + return size; + } + } +- +- private static class LegacyRangeSliceReplySerializer implements IVersionedSerializer +- { +- public void serialize(ReadResponse response, DataOutputPlus out, int version) throws IOException +- { +- assert version < MessagingService.VERSION_30; +- +- // determine the number of partitions upfront for serialization +- int numPartitions = 0; +- assert response.command != null; // we only serialize LocalDataResponse, which always has the command set +- try (UnfilteredPartitionIterator iterator = response.makeIterator(response.command)) +- { +- while (iterator.hasNext()) +- { +- try (UnfilteredRowIterator atomIterator = iterator.next()) +- { +- numPartitions++; +- +- // we have to fully exhaust the subiterator +- while (atomIterator.hasNext()) +- atomIterator.next(); +- } +- } +- } +- +- out.writeInt(numPartitions); +- +- try (UnfilteredPartitionIterator iterator = response.makeIterator(response.command)) +- { +- while (iterator.hasNext()) +- { +- try (UnfilteredRowIterator partition = iterator.next()) +- { +- ByteBufferUtil.writeWithShortLength(partition.partitionKey().getKey(), out); +- LegacyLayout.serializeAsLegacyPartition(response.command, partition, out, version); +- } +- } +- } +- } +- +- public ReadResponse deserialize(DataInputPlus in, int version) throws IOException +- { +- assert version < MessagingService.VERSION_30; +- +- int partitionCount = in.readInt(); +- ArrayList partitions = new ArrayList<>(partitionCount); +- for (int i = 0; i < partitionCount; i++) +- { +- ByteBuffer key = ByteBufferUtil.readWithShortLength(in); +- try (UnfilteredRowIterator partition = LegacyLayout.deserializeLegacyPartition(in, version, SerializationHelper.Flag.FROM_REMOTE, key)) +- { +- partitions.add(ImmutableBTreePartition.create(partition)); +- } +- } +- return new LegacyRemoteDataResponse(partitions); +- } +- +- public long serializedSize(ReadResponse response, int version) +- { +- assert version < MessagingService.VERSION_30; +- long size = TypeSizes.sizeof(0); // number of partitions +- +- assert response.command != null; // we only serialize LocalDataResponse, which always has the command set +- try (UnfilteredPartitionIterator iterator = response.makeIterator(response.command)) +- { +- while (iterator.hasNext()) +- { +- try (UnfilteredRowIterator partition = iterator.next()) +- { +- size += ByteBufferUtil.serializedSizeWithShortLength(partition.partitionKey().getKey()); +- size += LegacyLayout.serializedSizeAsLegacyPartition(response.command, partition, version); +- } +- } +- } +- return size; +- } +- } + } +diff --git a/src/java/org/apache/cassandra/db/RowIndexEntry.java b/src/java/org/apache/cassandra/db/RowIndexEntry.java +index dd1fdb7..8983a1c 100644 +--- a/src/java/org/apache/cassandra/db/RowIndexEntry.java ++++ b/src/java/org/apache/cassandra/db/RowIndexEntry.java +@@ -23,7 +23,6 @@ import java.util.Arrays; + import java.util.List; + + import com.codahale.metrics.Histogram; +-import org.apache.cassandra.config.CFMetaData; + import org.apache.cassandra.cache.IMeasurableMemory; + import org.apache.cassandra.config.DatabaseDescriptor; + import org.apache.cassandra.io.ISerializer; +@@ -111,9 +110,6 @@ import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics; + *

  • {@link ShallowIndexedEntry} is for index entries with index samples + * that exceed {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb} + * for sstables with an offset table to the index samples.
  • +- *
  • {@link LegacyShallowIndexedEntry} is for index entries with index samples +- * that exceed {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb} +- * but for legacy sstables.
  • + * + *

    + * Since access to index samples on disk (obviously) requires some file +@@ -173,16 +169,6 @@ public class RowIndexEntry implements IMeasurableMemory + throw new UnsupportedOperationException(); + } + +- /** +- * The length of the row header (partition key, partition deletion and static row). +- * This value is only provided for indexed entries and this method will throw +- * {@code UnsupportedOperationException} if {@code !isIndexed()}. +- */ +- public long headerLength() +- { +- throw new UnsupportedOperationException(); +- } +- + public int columnsIndexCount() + { + return 0; +@@ -251,9 +237,9 @@ public class RowIndexEntry implements IMeasurableMemory + private final IndexInfo.Serializer idxInfoSerializer; + private final Version version; + +- public Serializer(CFMetaData metadata, Version version, SerializationHeader header) ++ public Serializer(Version version, SerializationHeader header) + { +- this.idxInfoSerializer = metadata.serializers().indexInfoSerializer(version, header); ++ this.idxInfoSerializer = IndexInfo.serializer(version, header); + this.version = version; + } + +@@ -264,22 +250,16 @@ public class RowIndexEntry implements IMeasurableMemory + + public void serialize(RowIndexEntry rie, DataOutputPlus out, ByteBuffer indexInfo) throws IOException + { +- assert version.storeRows() : "We read old index files but we should never write them"; +- + rie.serialize(out, idxInfoSerializer, indexInfo); + } + + public void serializeForCache(RowIndexEntry rie, DataOutputPlus out) throws IOException + { +- assert version.storeRows(); +- + rie.serializeForCache(out); + } + + public RowIndexEntry deserializeForCache(DataInputPlus in) throws IOException + { +- assert version.storeRows(); +- + long position = in.readUnsignedVInt(); + + switch (in.readByte()) +@@ -295,10 +275,8 @@ public class RowIndexEntry implements IMeasurableMemory + } + } + +- public static void skipForCache(DataInputPlus in, Version version) throws IOException ++ public static void skipForCache(DataInputPlus in) throws IOException + { +- assert version.storeRows(); +- + /* long position = */in.readUnsignedVInt(); + switch (in.readByte()) + { +@@ -317,9 +295,6 @@ public class RowIndexEntry implements IMeasurableMemory + + public RowIndexEntry deserialize(DataInputPlus in, long indexFilePosition) throws IOException + { +- if (!version.storeRows()) +- return LegacyShallowIndexedEntry.deserialize(in, indexFilePosition, idxInfoSerializer); +- + long position = in.readUnsignedVInt(); + + int size = (int)in.readUnsignedVInt(); +@@ -354,9 +329,6 @@ public class RowIndexEntry implements IMeasurableMemory + + public long deserializePositionAndSkip(DataInputPlus in) throws IOException + { +- if (!version.storeRows()) +- return LegacyShallowIndexedEntry.deserializePositionAndSkip(in); +- + return ShallowIndexedEntry.deserializePositionAndSkip(in); + } + +@@ -365,20 +337,20 @@ public class RowIndexEntry implements IMeasurableMemory + * of reading an entry, so this is only useful if you know what you are doing and in most case 'deserialize' + * should be used instead. + */ +- public static long readPosition(DataInputPlus in, Version version) throws IOException ++ public static long readPosition(DataInputPlus in) throws IOException + { +- return version.storeRows() ? in.readUnsignedVInt() : in.readLong(); ++ return in.readUnsignedVInt(); + } + + public static void skip(DataInputPlus in, Version version) throws IOException + { +- readPosition(in, version); +- skipPromotedIndex(in, version); ++ readPosition(in); ++ skipPromotedIndex(in); + } + +- private static void skipPromotedIndex(DataInputPlus in, Version version) throws IOException ++ private static void skipPromotedIndex(DataInputPlus in) throws IOException + { +- int size = version.storeRows() ? (int)in.readUnsignedVInt() : in.readInt(); ++ int size = (int)in.readUnsignedVInt(); + if (size <= 0) + return; + +@@ -413,164 +385,6 @@ public class RowIndexEntry implements IMeasurableMemory + out.writeByte(CACHE_NOT_INDEXED); + } + +- private static final class LegacyShallowIndexedEntry extends RowIndexEntry +- { +- private static final long BASE_SIZE; +- static +- { +- BASE_SIZE = ObjectSizes.measure(new LegacyShallowIndexedEntry(0, 0, DeletionTime.LIVE, 0, new int[0], null, 0)); +- } +- +- private final long indexFilePosition; +- private final int[] offsets; +- @Unmetered +- private final IndexInfo.Serializer idxInfoSerializer; +- private final DeletionTime deletionTime; +- private final long headerLength; +- private final int serializedSize; +- +- private LegacyShallowIndexedEntry(long dataFilePosition, long indexFilePosition, +- DeletionTime deletionTime, long headerLength, +- int[] offsets, IndexInfo.Serializer idxInfoSerializer, +- int serializedSize) +- { +- super(dataFilePosition); +- this.deletionTime = deletionTime; +- this.headerLength = headerLength; +- this.indexFilePosition = indexFilePosition; +- this.offsets = offsets; +- this.idxInfoSerializer = idxInfoSerializer; +- this.serializedSize = serializedSize; +- } +- +- @Override +- public DeletionTime deletionTime() +- { +- return deletionTime; +- } +- +- @Override +- public long headerLength() +- { +- return headerLength; +- } +- +- @Override +- public long unsharedHeapSize() +- { +- return BASE_SIZE + offsets.length * TypeSizes.sizeof(0); +- } +- +- @Override +- public int columnsIndexCount() +- { +- return offsets.length; +- } +- +- @Override +- public void serialize(DataOutputPlus out, IndexInfo.Serializer idxInfoSerializer, ByteBuffer indexInfo) +- { +- throw new UnsupportedOperationException("serializing legacy index entries is not supported"); +- } +- +- @Override +- public void serializeForCache(DataOutputPlus out) +- { +- throw new UnsupportedOperationException("serializing legacy index entries is not supported"); +- } +- +- @Override +- public IndexInfoRetriever openWithIndex(SegmentedFile indexFile) +- { +- int fieldsSize = (int) DeletionTime.serializer.serializedSize(deletionTime) +- + TypeSizes.sizeof(0); // columnIndexCount +- indexEntrySizeHistogram.update(serializedSize); +- indexInfoCountHistogram.update(offsets.length); +- return new LegacyIndexInfoRetriever(indexFilePosition + +- TypeSizes.sizeof(0L) + // position +- TypeSizes.sizeof(0) + // indexInfoSize +- fieldsSize, +- offsets, indexFile.createReader(), idxInfoSerializer); +- } +- +- public static RowIndexEntry deserialize(DataInputPlus in, long indexFilePosition, +- IndexInfo.Serializer idxInfoSerializer) throws IOException +- { +- long dataFilePosition = in.readLong(); +- +- int size = in.readInt(); +- if (size == 0) +- { +- return new RowIndexEntry<>(dataFilePosition); +- } +- else if (size <= DatabaseDescriptor.getColumnIndexCacheSize()) +- { +- return new IndexedEntry(dataFilePosition, in, idxInfoSerializer); +- } +- else +- { +- DeletionTime deletionTime = DeletionTime.serializer.deserialize(in); +- +- // For legacy sstables (i.e. sstables pre-"ma", pre-3.0) we have to scan all serialized IndexInfo +- // objects to calculate the offsets array. However, it might be possible to deserialize all +- // IndexInfo objects here - but to just skip feels more gentle to the heap/GC. +- +- int entries = in.readInt(); +- int[] offsets = new int[entries]; +- +- TrackedDataInputPlus tracked = new TrackedDataInputPlus(in); +- long start = tracked.getBytesRead(); +- long headerLength = 0L; +- for (int i = 0; i < entries; i++) +- { +- offsets[i] = (int) (tracked.getBytesRead() - start); +- if (i == 0) +- { +- IndexInfo info = idxInfoSerializer.deserialize(tracked); +- headerLength = info.offset; +- } +- else +- idxInfoSerializer.skip(tracked); +- } +- +- return new LegacyShallowIndexedEntry(dataFilePosition, indexFilePosition, deletionTime, headerLength, offsets, idxInfoSerializer, size); +- } +- } +- +- static long deserializePositionAndSkip(DataInputPlus in) throws IOException +- { +- long position = in.readLong(); +- +- int size = in.readInt(); +- if (size > 0) +- in.skipBytesFully(size); +- +- return position; +- } +- } +- +- private static final class LegacyIndexInfoRetriever extends FileIndexInfoRetriever +- { +- private final int[] offsets; +- +- private LegacyIndexInfoRetriever(long indexFilePosition, int[] offsets, FileDataInput reader, IndexInfo.Serializer idxInfoSerializer) +- { +- super(indexFilePosition, offsets.length, reader, idxInfoSerializer); +- this.offsets = offsets; +- } +- +- IndexInfo fetchIndex(int index) throws IOException +- { +- retrievals++; +- +- // seek to posision of IndexInfo +- indexReader.seek(indexInfoFilePosition + offsets[index]); +- +- // deserialize IndexInfo +- return idxInfoSerializer.deserialize(indexReader); +- } +- } +- + /** + * An entry in the row index for a row whose columns are indexed - used for both legacy and current formats. + */ +@@ -622,14 +436,9 @@ public class RowIndexEntry implements IMeasurableMemory + for (int i = 0; i < columnsIndexCount; i++) + this.columnsIndex[i] = idxInfoSerializer.deserialize(in); + +- int[] offsets = null; +- if (version.storeRows()) +- { +- offsets = new int[this.columnsIndex.length]; +- for (int i = 0; i < offsets.length; i++) +- offsets[i] = in.readInt(); +- } +- this.offsets = offsets; ++ this.offsets = new int[this.columnsIndex.length]; ++ for (int i = 0; i < offsets.length; i++) ++ offsets[i] = in.readInt(); + + this.indexedPartSize = indexedPartSize; + +@@ -660,36 +469,6 @@ public class RowIndexEntry implements IMeasurableMemory + this.idxInfoSerializer = idxInfoSerializer; + } + +- /** +- * Constructor called from {@link LegacyShallowIndexedEntry#deserialize(org.apache.cassandra.io.util.DataInputPlus, long, org.apache.cassandra.io.sstable.IndexInfo.Serializer)}. +- * Only for legacy sstables. +- */ +- private IndexedEntry(long dataFilePosition, DataInputPlus in, IndexInfo.Serializer idxInfoSerializer) throws IOException +- { +- super(dataFilePosition); +- +- long headerLength = 0; +- this.deletionTime = DeletionTime.serializer.deserialize(in); +- int columnsIndexCount = in.readInt(); +- +- TrackedDataInputPlus trackedIn = new TrackedDataInputPlus(in); +- +- this.columnsIndex = new IndexInfo[columnsIndexCount]; +- for (int i = 0; i < columnsIndexCount; i++) +- { +- this.columnsIndex[i] = idxInfoSerializer.deserialize(trackedIn); +- if (i == 0) +- headerLength = this.columnsIndex[i].offset; +- } +- this.headerLength = headerLength; +- +- this.offsets = null; +- +- this.indexedPartSize = (int) trackedIn.getBytesRead(); +- +- this.idxInfoSerializer = idxInfoSerializer; +- } +- + @Override + public boolean indexOnHeap() + { +@@ -709,12 +488,6 @@ public class RowIndexEntry implements IMeasurableMemory + } + + @Override +- public long headerLength() +- { +- return headerLength; +- } +- +- @Override + public IndexInfoRetriever openWithIndex(SegmentedFile indexFile) + { + indexEntrySizeHistogram.update(serializedSize(deletionTime, headerLength, columnsIndex.length) + indexedPartSize); +@@ -873,12 +646,6 @@ public class RowIndexEntry implements IMeasurableMemory + } + + @Override +- public long headerLength() +- { +- return headerLength; +- } +- +- @Override + public IndexInfoRetriever openWithIndex(SegmentedFile indexFile) + { + indexEntrySizeHistogram.update(indexedPartSize + fieldsSerializedSize); +diff --git a/src/java/org/apache/cassandra/db/Serializers.java b/src/java/org/apache/cassandra/db/Serializers.java +deleted file mode 100644 +index d6aac64..0000000 +--- a/src/java/org/apache/cassandra/db/Serializers.java ++++ /dev/null +@@ -1,183 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.db; +- +-import java.io.*; +-import java.nio.ByteBuffer; +-import java.util.List; +-import java.util.Map; +-import java.util.concurrent.ConcurrentHashMap; +- +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.db.marshal.AbstractType; +-import org.apache.cassandra.db.marshal.CompositeType; +-import org.apache.cassandra.io.ISerializer; +-import org.apache.cassandra.io.sstable.IndexInfo; +-import org.apache.cassandra.io.sstable.format.big.BigFormat; +-import org.apache.cassandra.io.util.DataInputPlus; +-import org.apache.cassandra.io.util.DataOutputPlus; +-import org.apache.cassandra.io.sstable.format.Version; +-import org.apache.cassandra.utils.ByteBufferUtil; +- +-/** +- * Holds references on serializers that depend on the table definition. +- */ +-public class Serializers +-{ +- private final CFMetaData metadata; +- +- private Map otherVersionClusteringSerializers; +- +- private final IndexInfo.Serializer latestVersionIndexSerializer; +- +- public Serializers(CFMetaData metadata) +- { +- this.metadata = metadata; +- this.latestVersionIndexSerializer = new IndexInfo.Serializer(BigFormat.latestVersion, +- indexEntryClusteringPrefixSerializer(BigFormat.latestVersion, SerializationHeader.makeWithoutStats(metadata))); +- } +- +- IndexInfo.Serializer indexInfoSerializer(Version version, SerializationHeader header) +- { +- // null header indicates streaming from pre-3.0 sstables +- if (version.equals(BigFormat.latestVersion) && header != null) +- return latestVersionIndexSerializer; +- +- if (otherVersionClusteringSerializers == null) +- otherVersionClusteringSerializers = new ConcurrentHashMap<>(); +- IndexInfo.Serializer serializer = otherVersionClusteringSerializers.get(version); +- if (serializer == null) +- { +- serializer = new IndexInfo.Serializer(version, +- indexEntryClusteringPrefixSerializer(version, header)); +- otherVersionClusteringSerializers.put(version, serializer); +- } +- return serializer; +- } +- +- // TODO: Once we drop support for old (pre-3.0) sstables, we can drop this method and inline the calls to +- // ClusteringPrefix.serializer directly. At which point this whole class probably becomes +- // unecessary (since IndexInfo.Serializer won't depend on the metadata either). +- private ISerializer indexEntryClusteringPrefixSerializer(Version version, SerializationHeader header) +- { +- if (!version.storeRows() || header == null) //null header indicates streaming from pre-3.0 sstables +- { +- return oldFormatSerializer(version); +- } +- +- return new NewFormatSerializer(version, header.clusteringTypes()); +- } +- +- private ISerializer oldFormatSerializer(Version version) +- { +- return new ISerializer() +- { +- List> clusteringTypes = SerializationHeader.makeWithoutStats(metadata).clusteringTypes(); +- +- public void serialize(ClusteringPrefix clustering, DataOutputPlus out) throws IOException +- { +- //we deserialize in the old format and serialize in the new format +- ClusteringPrefix.serializer.serialize(clustering, out, +- version.correspondingMessagingVersion(), +- clusteringTypes); +- } +- +- @Override +- public void skip(DataInputPlus in) throws IOException +- { +- ByteBufferUtil.skipShortLength(in); +- } +- +- public ClusteringPrefix deserialize(DataInputPlus in) throws IOException +- { +- // We're reading the old cellname/composite +- ByteBuffer bb = ByteBufferUtil.readWithShortLength(in); +- assert bb.hasRemaining(); // empty cellnames were invalid +- +- int clusteringSize = metadata.clusteringColumns().size(); +- // If the table has no clustering column, then the cellname will just be the "column" name, which we ignore here. +- if (clusteringSize == 0) +- return Clustering.EMPTY; +- +- if (!metadata.isCompound()) +- return Clustering.make(bb); +- +- List components = CompositeType.splitName(bb); +- byte eoc = CompositeType.lastEOC(bb); +- +- if (eoc == 0 || components.size() >= clusteringSize) +- { +- // That's a clustering. +- if (components.size() > clusteringSize) +- components = components.subList(0, clusteringSize); +- +- return Clustering.make(components.toArray(new ByteBuffer[clusteringSize])); +- } +- else +- { +- // It's a range tombstone bound. It is a start since that's the only part we've ever included +- // in the index entries. +- ClusteringPrefix.Kind boundKind = eoc > 0 +- ? ClusteringPrefix.Kind.EXCL_START_BOUND +- : ClusteringPrefix.Kind.INCL_START_BOUND; +- +- return ClusteringBound.create(boundKind, components.toArray(new ByteBuffer[components.size()])); +- } +- } +- +- public long serializedSize(ClusteringPrefix clustering) +- { +- return ClusteringPrefix.serializer.serializedSize(clustering, version.correspondingMessagingVersion(), +- clusteringTypes); +- } +- }; +- } +- +- private static class NewFormatSerializer implements ISerializer +- { +- private final Version version; +- private final List> clusteringTypes; +- +- NewFormatSerializer(Version version, List> clusteringTypes) +- { +- this.version = version; +- this.clusteringTypes = clusteringTypes; +- } +- +- public void serialize(ClusteringPrefix clustering, DataOutputPlus out) throws IOException +- { +- ClusteringPrefix.serializer.serialize(clustering, out, version.correspondingMessagingVersion(), clusteringTypes); +- } +- +- @Override +- public void skip(DataInputPlus in) throws IOException +- { +- ClusteringPrefix.serializer.skip(in, version.correspondingMessagingVersion(), clusteringTypes); +- } +- +- public ClusteringPrefix deserialize(DataInputPlus in) throws IOException +- { +- return ClusteringPrefix.serializer.deserialize(in, version.correspondingMessagingVersion(), clusteringTypes); +- } +- +- public long serializedSize(ClusteringPrefix clustering) +- { +- return ClusteringPrefix.serializer.serializedSize(clustering, version.correspondingMessagingVersion(), clusteringTypes); +- } +- } +-} +diff --git a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java +index 855b030..f5ec2ac 100644 +--- a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java ++++ b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java +@@ -49,7 +49,6 @@ import org.apache.cassandra.service.CacheService; + import org.apache.cassandra.service.ClientState; + import org.apache.cassandra.service.StorageProxy; + import org.apache.cassandra.service.pager.*; +-import org.apache.cassandra.thrift.ThriftResultsMerger; + import org.apache.cassandra.tracing.Tracing; + import org.apache.cassandra.utils.FBUtilities; + import org.apache.cassandra.utils.SearchIterator; +@@ -71,7 +70,6 @@ public class SinglePartitionReadCommand extends ReadCommand + + public SinglePartitionReadCommand(boolean isDigest, + int digestVersion, +- boolean isForThrift, + CFMetaData metadata, + int nowInSec, + ColumnFilter columnFilter, +@@ -80,7 +78,7 @@ public class SinglePartitionReadCommand extends ReadCommand + DecoratedKey partitionKey, + ClusteringIndexFilter clusteringIndexFilter) + { +- super(Kind.SINGLE_PARTITION, isDigest, digestVersion, isForThrift, metadata, nowInSec, columnFilter, rowFilter, limits); ++ super(Kind.SINGLE_PARTITION, isDigest, digestVersion, metadata, nowInSec, columnFilter, rowFilter, limits); + assert partitionKey.getPartitioner() == metadata.partitioner; + this.partitionKey = partitionKey; + this.clusteringIndexFilter = clusteringIndexFilter; +@@ -107,33 +105,7 @@ public class SinglePartitionReadCommand extends ReadCommand + DecoratedKey partitionKey, + ClusteringIndexFilter clusteringIndexFilter) + { +- return create(false, metadata, nowInSec, columnFilter, rowFilter, limits, partitionKey, clusteringIndexFilter); +- } +- +- /** +- * Creates a new read command on a single partition for thrift. +- * +- * @param isForThrift whether the query is for thrift or not. +- * @param metadata the table to query. +- * @param nowInSec the time in seconds to use are "now" for this query. +- * @param columnFilter the column filter to use for the query. +- * @param rowFilter the row filter to use for the query. +- * @param limits the limits to use for the query. +- * @param partitionKey the partition key for the partition to query. +- * @param clusteringIndexFilter the clustering index filter to use for the query. +- * +- * @return a newly created read command. +- */ +- public static SinglePartitionReadCommand create(boolean isForThrift, +- CFMetaData metadata, +- int nowInSec, +- ColumnFilter columnFilter, +- RowFilter rowFilter, +- DataLimits limits, +- DecoratedKey partitionKey, +- ClusteringIndexFilter clusteringIndexFilter) +- { +- return new SinglePartitionReadCommand(false, 0, isForThrift, metadata, nowInSec, columnFilter, rowFilter, limits, partitionKey, clusteringIndexFilter); ++ return new SinglePartitionReadCommand(false, 0, metadata, nowInSec, columnFilter, rowFilter, limits, partitionKey, clusteringIndexFilter); + } + + /** +@@ -264,7 +236,7 @@ public class SinglePartitionReadCommand extends ReadCommand + + public SinglePartitionReadCommand copy() + { +- return new SinglePartitionReadCommand(isDigestQuery(), digestVersion(), isForThrift(), metadata(), nowInSec(), columnFilter(), rowFilter(), limits(), partitionKey(), clusteringIndexFilter()); ++ return new SinglePartitionReadCommand(isDigestQuery(), digestVersion(), metadata(), nowInSec(), columnFilter(), rowFilter(), limits(), partitionKey(), clusteringIndexFilter()); + } + + public DecoratedKey partitionKey() +@@ -320,8 +292,7 @@ public class SinglePartitionReadCommand extends ReadCommand + { + // We shouldn't have set digest yet when reaching that point + assert !isDigestQuery(); +- return create(isForThrift(), +- metadata(), ++ return create(metadata(), + nowInSec(), + columnFilter(), + rowFilter(), +@@ -356,7 +327,7 @@ public class SinglePartitionReadCommand extends ReadCommand + UnfilteredRowIterator partition = cfs.isRowCacheEnabled() + ? getThroughCache(cfs, executionController) + : queryMemtableAndDisk(cfs, executionController); +- return new SingletonUnfilteredPartitionIterator(partition, isForThrift()); ++ return new SingletonUnfilteredPartitionIterator(partition); + } + + /** +@@ -533,7 +504,7 @@ public class SinglePartitionReadCommand extends ReadCommand + @SuppressWarnings("resource") // 'iter' is added to iterators which is closed on exception, or through the closing of the final merged iterator + UnfilteredRowIterator iter = filter.getUnfilteredRowIterator(columnFilter(), partition); + oldestUnrepairedTombstone = Math.min(oldestUnrepairedTombstone, partition.stats().minLocalDeletionTime); +- iterators.add(isForThrift() ? ThriftResultsMerger.maybeWrap(iter, nowInSec()) : iter); ++ iterators.add(iter); + } + + /* +@@ -576,7 +547,7 @@ public class SinglePartitionReadCommand extends ReadCommand + + @SuppressWarnings("resource") // 'iter' is added to iterators which is closed on exception, + // or through the closing of the final merged iterator +- UnfilteredRowIteratorWithLowerBound iter = makeIterator(cfs, sstable, true); ++ UnfilteredRowIteratorWithLowerBound iter = makeIterator(cfs, sstable); + if (!sstable.isRepaired()) + oldestUnrepairedTombstone = Math.min(oldestUnrepairedTombstone, sstable.getMinLocalDeletionTime()); + +@@ -596,7 +567,7 @@ public class SinglePartitionReadCommand extends ReadCommand + + @SuppressWarnings("resource") // 'iter' is added to iterators which is close on exception, + // or through the closing of the final merged iterator +- UnfilteredRowIteratorWithLowerBound iter = makeIterator(cfs, sstable, false); ++ UnfilteredRowIteratorWithLowerBound iter = makeIterator(cfs, sstable); + if (!sstable.isRepaired()) + oldestUnrepairedTombstone = Math.min(oldestUnrepairedTombstone, sstable.getMinLocalDeletionTime()); + +@@ -639,16 +610,13 @@ public class SinglePartitionReadCommand extends ReadCommand + return clusteringIndexFilter().shouldInclude(sstable); + } + +- private UnfilteredRowIteratorWithLowerBound makeIterator(ColumnFamilyStore cfs, final SSTableReader sstable, boolean applyThriftTransformation) ++ private UnfilteredRowIteratorWithLowerBound makeIterator(ColumnFamilyStore cfs, final SSTableReader sstable) + { + return StorageHook.instance.makeRowIteratorWithLowerBound(cfs, + partitionKey(), + sstable, + clusteringIndexFilter(), +- columnFilter(), +- isForThrift(), +- nowInSec(), +- applyThriftTransformation); ++ columnFilter()); + + } + +@@ -723,7 +691,7 @@ public class SinglePartitionReadCommand extends ReadCommand + if (iter.isEmpty()) + continue; + +- result = add(isForThrift() ? ThriftResultsMerger.maybeWrap(iter, nowInSec()) : iter, result, filter, false); ++ result = add(iter, result, filter, false); + } + } + +@@ -756,7 +724,7 @@ public class SinglePartitionReadCommand extends ReadCommand + + // We need to get the partition deletion and include it if it's live. In any case though, we're done with that sstable. + sstable.incrementReadCount(); +- try (UnfilteredRowIterator iter = StorageHook.instance.makeRowIterator(cfs, sstable, partitionKey(), Slices.ALL, columnFilter(), filter.isReversed(), isForThrift())) ++ try (UnfilteredRowIterator iter = StorageHook.instance.makeRowIterator(cfs, sstable, partitionKey(), Slices.ALL, columnFilter(), filter.isReversed())) + { + if (iter.partitionLevelDeletion().isLive()) + { +@@ -769,7 +737,7 @@ public class SinglePartitionReadCommand extends ReadCommand + + Tracing.trace("Merging data from sstable {}", sstable.descriptor.generation); + sstable.incrementReadCount(); +- try (UnfilteredRowIterator iter = StorageHook.instance.makeRowIterator(cfs, sstable, partitionKey(), filter.getSlices(metadata()), columnFilter(), filter.isReversed(), isForThrift())) ++ try (UnfilteredRowIterator iter = StorageHook.instance.makeRowIterator(cfs, sstable, partitionKey(), filter.getSlices(metadata()), columnFilter(), filter.isReversed())) + { + if (iter.isEmpty()) + continue; +@@ -777,7 +745,7 @@ public class SinglePartitionReadCommand extends ReadCommand + if (sstable.isRepaired()) + onlyUnrepaired = false; + sstablesIterated++; +- result = add(isForThrift() ? ThriftResultsMerger.maybeWrap(iter, nowInSec()) : iter, result, filter, sstable.isRepaired()); ++ result = add(iter, result, filter, sstable.isRepaired()); + } + } + +@@ -1034,12 +1002,12 @@ public class SinglePartitionReadCommand extends ReadCommand + + private static class Deserializer extends SelectionDeserializer + { +- public ReadCommand deserialize(DataInputPlus in, int version, boolean isDigest, int digestVersion, boolean isForThrift, CFMetaData metadata, int nowInSec, ColumnFilter columnFilter, RowFilter rowFilter, DataLimits limits, Optional index) ++ public ReadCommand deserialize(DataInputPlus in, int version, boolean isDigest, int digestVersion, CFMetaData metadata, int nowInSec, ColumnFilter columnFilter, RowFilter rowFilter, DataLimits limits, Optional index) + throws IOException + { + DecoratedKey key = metadata.decorateKey(metadata.getKeyValidator().readValue(in, DatabaseDescriptor.getMaxValueSize())); + ClusteringIndexFilter filter = ClusteringIndexFilter.serializer.deserialize(in, version, metadata); +- return new SinglePartitionReadCommand(isDigest, digestVersion, isForThrift, metadata, nowInSec, columnFilter, rowFilter, limits, key, filter); ++ return new SinglePartitionReadCommand(isDigest, digestVersion, metadata, nowInSec, columnFilter, rowFilter, limits, key, filter); + } + } + } +diff --git a/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java b/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java +index 0b31b87..9c199b6 100644 +--- a/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java ++++ b/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java +@@ -40,8 +40,7 @@ import org.apache.cassandra.utils.concurrent.Refs; + /** + * A very simplistic/crude partition count/size estimator. + * +- * Exposing per-primary-range estimated partitions count and size in CQL form, +- * as a direct CQL alternative to Thrift's describe_splits_ex(). ++ * Exposing per-primary-range estimated partitions count and size in CQL form. + * + * Estimates (per primary range) are calculated and dumped into a system table (system.size_estimates) every 5 minutes. + * +diff --git a/src/java/org/apache/cassandra/db/StorageHook.java b/src/java/org/apache/cassandra/db/StorageHook.java +index 0f27adb..c1e7f66 100644 +--- a/src/java/org/apache/cassandra/db/StorageHook.java ++++ b/src/java/org/apache/cassandra/db/StorageHook.java +@@ -38,17 +38,13 @@ public interface StorageHook + DecoratedKey partitionKey, + SSTableReader sstable, + ClusteringIndexFilter filter, +- ColumnFilter selectedColumns, +- boolean isForThrift, +- int nowInSec, +- boolean applyThriftTransformation); ++ ColumnFilter selectedColumns); + public UnfilteredRowIterator makeRowIterator(ColumnFamilyStore cfs, + SSTableReader sstable, + DecoratedKey key, + Slices slices, + ColumnFilter selectedColumns, +- boolean reversed, +- boolean isForThrift); ++ boolean reversed); + + static StorageHook createHook() + { +@@ -65,20 +61,17 @@ public interface StorageHook + + public void reportRead(UUID cfid, DecoratedKey key) {} + +- public UnfilteredRowIteratorWithLowerBound makeRowIteratorWithLowerBound(ColumnFamilyStore cfs, DecoratedKey partitionKey, SSTableReader sstable, ClusteringIndexFilter filter, ColumnFilter selectedColumns, boolean isForThrift, int nowInSec, boolean applyThriftTransformation) ++ public UnfilteredRowIteratorWithLowerBound makeRowIteratorWithLowerBound(ColumnFamilyStore cfs, DecoratedKey partitionKey, SSTableReader sstable, ClusteringIndexFilter filter, ColumnFilter selectedColumns) + { + return new UnfilteredRowIteratorWithLowerBound(partitionKey, + sstable, + filter, +- selectedColumns, +- isForThrift, +- nowInSec, +- applyThriftTransformation); ++ selectedColumns); + } + +- public UnfilteredRowIterator makeRowIterator(ColumnFamilyStore cfs, SSTableReader sstable, DecoratedKey key, Slices slices, ColumnFilter selectedColumns, boolean reversed, boolean isForThrift) ++ public UnfilteredRowIterator makeRowIterator(ColumnFamilyStore cfs, SSTableReader sstable, DecoratedKey key, Slices slices, ColumnFilter selectedColumns, boolean reversed) + { +- return sstable.iterator(key, slices, selectedColumns, reversed, isForThrift); ++ return sstable.iterator(key, slices, selectedColumns, reversed); + } + }; + } +diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java b/src/java/org/apache/cassandra/db/SystemKeyspace.java +index 36629a1..b6aa01e 100644 +--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java ++++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java +@@ -37,6 +37,7 @@ import org.slf4j.Logger; + import org.slf4j.LoggerFactory; + + import org.apache.cassandra.config.CFMetaData; ++import org.apache.cassandra.config.ColumnDefinition; + import org.apache.cassandra.config.DatabaseDescriptor; + import org.apache.cassandra.cql3.QueryProcessor; + import org.apache.cassandra.cql3.UntypedResultSet; +@@ -57,7 +58,6 @@ import org.apache.cassandra.schema.*; + import org.apache.cassandra.service.StorageService; + import org.apache.cassandra.service.paxos.Commit; + import org.apache.cassandra.service.paxos.PaxosState; +-import org.apache.cassandra.thrift.cassandraConstants; + import org.apache.cassandra.transport.Server; + import org.apache.cassandra.utils.*; + +@@ -165,10 +165,10 @@ public final class SystemKeyspace + + "release_version text," + + "rpc_address inet," + + "schema_version uuid," +- + "thrift_version text," + + "tokens set," + + "truncated_at map," +- + "PRIMARY KEY ((key)))"); ++ + "PRIMARY KEY ((key)))" ++ ).recordDeprecatedSystemColumn("thrift_version", UTF8Type.instance); + + private static final CFMetaData Peers = + compile(PEERS, +@@ -478,7 +478,6 @@ public final class SystemKeyspace + "cluster_name," + + "release_version," + + "cql_version," + +- "thrift_version," + + "native_protocol_version," + + "data_center," + + "rack," + +@@ -486,14 +485,13 @@ public final class SystemKeyspace + "rpc_address," + + "broadcast_address," + + "listen_address" + +- ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; ++ ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch(); + executeOnceInternal(String.format(req, LOCAL), + LOCAL, + DatabaseDescriptor.getClusterName(), + FBUtilities.getReleaseVersionString(), + QueryProcessor.CQL_VERSION.toString(), +- cassandraConstants.VERSION, + String.valueOf(Server.CURRENT_VERSION), + snitch.getDatacenter(FBUtilities.getBroadcastAddress()), + snitch.getRack(FBUtilities.getBroadcastAddress()), +diff --git a/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java b/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java +index 1ab96fa..2755a57 100644 +--- a/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java ++++ b/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java +@@ -59,14 +59,9 @@ public abstract class UnfilteredDeserializer + public static UnfilteredDeserializer create(CFMetaData metadata, + DataInputPlus in, + SerializationHeader header, +- SerializationHelper helper, +- DeletionTime partitionDeletion, +- boolean readAllAsDynamic) ++ SerializationHelper helper) + { +- if (helper.version >= MessagingService.VERSION_30) +- return new CurrentDeserializer(metadata, in, header, helper); +- else +- return new OldFormatDeserializer(metadata, in, helper, partitionDeletion, readAllAsDynamic); ++ return new CurrentDeserializer(metadata, in, header, helper); + } + + /** +@@ -239,432 +234,4 @@ public abstract class UnfilteredDeserializer + return 0; + } + } +- +- public static class OldFormatDeserializer extends UnfilteredDeserializer +- { +- private final boolean readAllAsDynamic; +- private boolean skipStatic; +- +- // The next Unfiltered to return, computed by hasNext() +- private Unfiltered next; +- // A temporary storage for an unfiltered that isn't returned next but should be looked at just afterwards +- private Unfiltered saved; +- +- private boolean isFirst = true; +- +- // The Unfiltered as read from the old format input +- private final UnfilteredIterator iterator; +- +- // The position in the input after the last data consumption (readNext/skipNext). +- private long lastConsumedPosition; +- +- private OldFormatDeserializer(CFMetaData metadata, +- DataInputPlus in, +- SerializationHelper helper, +- DeletionTime partitionDeletion, +- boolean readAllAsDynamic) +- { +- super(metadata, in, helper); +- this.iterator = new UnfilteredIterator(partitionDeletion); +- this.readAllAsDynamic = readAllAsDynamic; +- this.lastConsumedPosition = currentPosition(); +- } +- +- public void setSkipStatic() +- { +- this.skipStatic = true; +- } +- +- private boolean isStatic(Unfiltered unfiltered) +- { +- return unfiltered.isRow() && ((Row)unfiltered).isStatic(); +- } +- +- public boolean hasNext() throws IOException +- { +- try +- { +- while (next == null) +- { +- if (saved == null && !iterator.hasNext()) +- return false; +- +- next = saved == null ? iterator.next() : saved; +- saved = null; +- +- // The sstable iterators assume that if there is one, the static row is the first thing this deserializer will return. +- // However, in the old format, a range tombstone with an empty start would sort before any static cell. So we should +- // detect that case and return the static parts first if necessary. +- if (isFirst && iterator.hasNext() && isStatic(iterator.peek())) +- { +- saved = next; +- next = iterator.next(); +- } +- isFirst = false; +- +- // When reading old tables, we sometimes want to skip static data (due to how staticly defined column of compact +- // tables are handled). +- if (skipStatic && isStatic(next)) +- next = null; +- } +- return true; +- } +- catch (IOError e) +- { +- if (e.getCause() != null && e.getCause() instanceof IOException) +- throw (IOException)e.getCause(); +- throw e; +- } +- } +- +- private boolean isRow(LegacyLayout.LegacyAtom atom) +- { +- if (atom.isCell()) +- return true; +- +- LegacyLayout.LegacyRangeTombstone tombstone = atom.asRangeTombstone(); +- return tombstone.isCollectionTombstone() || tombstone.isRowDeletion(metadata); +- } +- +- public int compareNextTo(ClusteringBound bound) throws IOException +- { +- if (!hasNext()) +- throw new IllegalStateException(); +- return metadata.comparator.compare(next.clustering(), bound); +- } +- +- public boolean nextIsRow() throws IOException +- { +- if (!hasNext()) +- throw new IllegalStateException(); +- return next.isRow(); +- } +- +- public boolean nextIsStatic() throws IOException +- { +- return nextIsRow() && ((Row)next).isStatic(); +- } +- +- private long currentPosition() +- { +- // We return a bogus value if the input is not file based, but check we never rely +- // on that value in that case in bytesReadForUnconsumedData +- return in instanceof FileDataInput ? ((FileDataInput)in).getFilePointer() : 0; +- } +- +- public Unfiltered readNext() throws IOException +- { +- if (!hasNext()) +- throw new IllegalStateException(); +- Unfiltered toReturn = next; +- next = null; +- lastConsumedPosition = currentPosition(); +- return toReturn; +- } +- +- public void skipNext() throws IOException +- { +- if (!hasNext()) +- throw new UnsupportedOperationException(); +- next = null; +- lastConsumedPosition = currentPosition(); +- } +- +- public long bytesReadForUnconsumedData() +- { +- if (!(in instanceof FileDataInput)) +- throw new AssertionError(); +- +- return currentPosition() - lastConsumedPosition; +- } +- +- public void clearState() +- { +- next = null; +- saved = null; +- iterator.clearState(); +- lastConsumedPosition = currentPosition(); +- } +- +- // Groups atoms from the input into proper Unfiltered. +- // Note: this could use guava AbstractIterator except that we want to be able to clear +- // the internal state of the iterator so it's cleaner to do it ourselves. +- private class UnfilteredIterator implements PeekingIterator +- { +- private final AtomIterator atoms; +- private final LegacyLayout.CellGrouper grouper; +- private final TombstoneTracker tombstoneTracker; +- +- private Unfiltered next; +- +- private UnfilteredIterator(DeletionTime partitionDeletion) +- { +- this.grouper = new LegacyLayout.CellGrouper(metadata, helper); +- this.tombstoneTracker = new TombstoneTracker(partitionDeletion); +- this.atoms = new AtomIterator(); +- } +- +- public boolean hasNext() +- { +- // Note that we loop on next == null because TombstoneTracker.openNew() could return null below or the atom might be shadowed. +- while (next == null) +- { +- if (atoms.hasNext()) +- { +- // If a range tombstone closes strictly before the next row/RT, we need to return that close (or boundary) marker first. +- if (tombstoneTracker.hasClosingMarkerBefore(atoms.peek())) +- { +- next = tombstoneTracker.popClosingMarker(); +- } +- else +- { +- LegacyLayout.LegacyAtom atom = atoms.next(); +- if (!tombstoneTracker.isShadowed(atom)) +- next = isRow(atom) ? readRow(atom) : tombstoneTracker.openNew(atom.asRangeTombstone()); +- } +- } +- else if (tombstoneTracker.hasOpenTombstones()) +- { +- next = tombstoneTracker.popClosingMarker(); +- } +- else +- { +- return false; +- } +- } +- return true; +- } +- +- private Unfiltered readRow(LegacyLayout.LegacyAtom first) +- { +- LegacyLayout.CellGrouper grouper = first.isStatic() +- ? LegacyLayout.CellGrouper.staticGrouper(metadata, helper) +- : this.grouper; +- grouper.reset(); +- grouper.addAtom(first); +- // As long as atoms are part of the same row, consume them. Note that the call to addAtom() uses +- // atoms.peek() so that the atom is only consumed (by next) if it's part of the row (addAtom returns true) +- while (atoms.hasNext() && grouper.addAtom(atoms.peek())) +- { +- atoms.next(); +- } +- return grouper.getRow(); +- } +- +- public Unfiltered next() +- { +- if (!hasNext()) +- throw new UnsupportedOperationException(); +- Unfiltered toReturn = next; +- next = null; +- return toReturn; +- } +- +- public Unfiltered peek() +- { +- if (!hasNext()) +- throw new UnsupportedOperationException(); +- return next; +- } +- +- public void clearState() +- { +- atoms.clearState(); +- tombstoneTracker.clearState(); +- next = null; +- } +- +- public void remove() +- { +- throw new UnsupportedOperationException(); +- } +- } +- +- // Wraps the input of the deserializer to provide an iterator (and skip shadowed atoms). +- // Note: this could use guava AbstractIterator except that we want to be able to clear +- // the internal state of the iterator so it's cleaner to do it ourselves. +- private class AtomIterator implements PeekingIterator +- { +- private boolean isDone; +- private LegacyLayout.LegacyAtom next; +- +- private AtomIterator() +- { +- } +- +- public boolean hasNext() +- { +- if (isDone) +- return false; +- +- if (next == null) +- { +- next = readAtom(); +- if (next == null) +- { +- isDone = true; +- return false; +- } +- } +- return true; +- } +- +- private LegacyLayout.LegacyAtom readAtom() +- { +- try +- { +- return LegacyLayout.readLegacyAtom(metadata, in, readAllAsDynamic); +- } +- catch (IOException e) +- { +- throw new IOError(e); +- } +- } +- +- public LegacyLayout.LegacyAtom next() +- { +- if (!hasNext()) +- throw new UnsupportedOperationException(); +- LegacyLayout.LegacyAtom toReturn = next; +- next = null; +- return toReturn; +- } +- +- public LegacyLayout.LegacyAtom peek() +- { +- if (!hasNext()) +- throw new UnsupportedOperationException(); +- return next; +- } +- +- public void clearState() +- { +- this.next = null; +- this.isDone = false; +- } +- +- public void remove() +- { +- throw new UnsupportedOperationException(); +- } +- } +- +- /** +- * Tracks which range tombstones are open when deserializing the old format. +- */ +- private class TombstoneTracker +- { +- private final DeletionTime partitionDeletion; +- +- // Open tombstones sorted by their closing bound (i.e. first tombstone is the first to close). +- // As we only track non-fully-shadowed ranges, the first range is necessarily the currently +- // open tombstone (the one with the higher timestamp). +- private final SortedSet openTombstones; +- +- public TombstoneTracker(DeletionTime partitionDeletion) +- { +- this.partitionDeletion = partitionDeletion; +- this.openTombstones = new TreeSet<>((rt1, rt2) -> metadata.comparator.compare(rt1.stop.bound, rt2.stop.bound)); +- } +- +- /** +- * Checks if the provided atom is fully shadowed by the open tombstones of this tracker (or the partition deletion). +- */ +- public boolean isShadowed(LegacyLayout.LegacyAtom atom) +- { +- assert !hasClosingMarkerBefore(atom); +- long timestamp = atom.isCell() ? atom.asCell().timestamp : atom.asRangeTombstone().deletionTime.markedForDeleteAt(); +- +- if (partitionDeletion.deletes(timestamp)) +- return true; +- +- SortedSet coveringTombstones = isRow(atom) ? openTombstones : openTombstones.tailSet(atom.asRangeTombstone()); +- return Iterables.any(coveringTombstones, tombstone -> tombstone.deletionTime.deletes(timestamp)); +- } +- +- /** +- * Whether the currently open marker closes stricly before the provided row/RT. +- */ +- public boolean hasClosingMarkerBefore(LegacyLayout.LegacyAtom atom) +- { +- return !openTombstones.isEmpty() +- && metadata.comparator.compare(openTombstones.first().stop.bound, atom.clustering()) < 0; +- } +- +- /** +- * Returns the unfiltered corresponding to closing the currently open marker (and update the tracker accordingly). +- */ +- public Unfiltered popClosingMarker() +- { +- assert !openTombstones.isEmpty(); +- +- Iterator iter = openTombstones.iterator(); +- LegacyLayout.LegacyRangeTombstone first = iter.next(); +- iter.remove(); +- +- // If that was the last open tombstone, we just want to close it. Otherwise, we have a boundary with the +- // next tombstone +- if (!iter.hasNext()) +- return new RangeTombstoneBoundMarker(first.stop.bound, first.deletionTime); +- +- LegacyLayout.LegacyRangeTombstone next = iter.next(); +- return RangeTombstoneBoundaryMarker.makeBoundary(false, first.stop.bound, first.stop.bound.invert(), first.deletionTime, next.deletionTime); +- } +- +- /** +- * Update the tracker given the provided newly open tombstone. This return the Unfiltered corresponding to the opening +- * of said tombstone: this can be a simple open mark, a boundary (if there was an open tombstone superseded by this new one) +- * or even null (if the new tombston start is supersedes by the currently open tombstone). +- * +- * Note that this method assume the added tombstone is not fully shadowed, i.e. that !isShadowed(tombstone). It also +- * assumes no opened tombstone closes before that tombstone (so !hasClosingMarkerBefore(tombstone)). +- */ +- public Unfiltered openNew(LegacyLayout.LegacyRangeTombstone tombstone) +- { +- if (openTombstones.isEmpty()) +- { +- openTombstones.add(tombstone); +- return new RangeTombstoneBoundMarker(tombstone.start.bound, tombstone.deletionTime); +- } +- +- Iterator iter = openTombstones.iterator(); +- LegacyLayout.LegacyRangeTombstone first = iter.next(); +- if (tombstone.deletionTime.supersedes(first.deletionTime)) +- { +- // We're supperseding the currently open tombstone, so we should produce a boundary that close the currently open +- // one and open the new one. We should also add the tombstone, but if it stop after the first one, we should +- // also remove that first tombstone as it won't be useful anymore. +- if (metadata.comparator.compare(tombstone.stop.bound, first.stop.bound) >= 0) +- iter.remove(); +- +- openTombstones.add(tombstone); +- return RangeTombstoneBoundaryMarker.makeBoundary(false, tombstone.start.bound.invert(), tombstone.start.bound, first.deletionTime, tombstone.deletionTime); +- } +- else +- { +- // If the new tombstone don't supersedes the currently open tombstone, we don't have anything to return, we +- // just add the new tombstone (because we know tombstone is not fully shadowed, this imply the new tombstone +- // simply extend after the first one and we'll deal with it later) +- assert metadata.comparator.compare(tombstone.start.bound, first.stop.bound) > 0; +- openTombstones.add(tombstone); +- return null; +- } +- } +- +- public boolean hasOpenTombstones() +- { +- return !openTombstones.isEmpty(); +- } +- +- private boolean formBoundary(LegacyLayout.LegacyRangeTombstone close, LegacyLayout.LegacyRangeTombstone open) +- { +- return metadata.comparator.compare(close.stop.bound, open.start.bound) == 0; +- } +- +- public void clearState() +- { +- openTombstones.clear(); +- } +- } +- } + } +diff --git a/src/java/org/apache/cassandra/db/columniterator/AbstractSSTableIterator.java b/src/java/org/apache/cassandra/db/columniterator/AbstractSSTableIterator.java +index 5959a20..36b0ff8 100644 +--- a/src/java/org/apache/cassandra/db/columniterator/AbstractSSTableIterator.java ++++ b/src/java/org/apache/cassandra/db/columniterator/AbstractSSTableIterator.java +@@ -45,8 +45,6 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator + protected final Row staticRow; + protected final Reader reader; + +- private final boolean isForThrift; +- + protected final SegmentedFile ifile; + + private boolean isClosed; +@@ -61,7 +59,6 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator + RowIndexEntry indexEntry, + Slices slices, + ColumnFilter columnFilter, +- boolean isForThrift, + SegmentedFile ifile) + { + this.sstable = sstable; +@@ -70,7 +67,6 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator + this.columns = columnFilter; + this.slices = slices; + this.helper = new SerializationHelper(sstable.metadata, sstable.descriptor.version.correspondingMessagingVersion(), SerializationHelper.Flag.LOCAL, columnFilter); +- this.isForThrift = isForThrift; + + if (indexEntry == null) + { +@@ -93,7 +89,7 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator + // but readStaticRow have already read them and might in fact have consumed the whole partition (when reading + // the legacy file format), so set the reader to null so we don't try to read anything more. We can remove this + // once we drop support for the legacy file format +- boolean needsReader = sstable.descriptor.version.storeRows() || isForThrift || !sstable.metadata.isStaticCompactTable(); ++ boolean needsReader = sstable.descriptor.version.storeRows() || !sstable.metadata.isStaticCompactTable(); + + if (needSeekAtPartitionStart) + { +@@ -109,7 +105,7 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator + // Note that this needs to be called after file != null and after the partitionDeletion has been set, but before readStaticRow + // (since it uses it) so we can't move that up (but we'll be able to simplify as soon as we drop support for the old file format). + this.reader = needsReader ? createReader(indexEntry, file, shouldCloseFile) : null; +- this.staticRow = readStaticRow(sstable, file, helper, columns.fetchedColumns().statics, isForThrift, reader == null ? null : reader.deserializer); ++ this.staticRow = readStaticRow(sstable, file, helper, columns.fetchedColumns().statics, reader == null ? null : reader.deserializer); + } + else + { +@@ -165,36 +161,8 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator + FileDataInput file, + SerializationHelper helper, + Columns statics, +- boolean isForThrift, + UnfilteredDeserializer deserializer) throws IOException + { +- if (!sstable.descriptor.version.storeRows()) +- { +- if (!sstable.metadata.isCompactTable()) +- { +- assert deserializer != null; +- return deserializer.hasNext() && deserializer.nextIsStatic() +- ? (Row)deserializer.readNext() +- : Rows.EMPTY_STATIC_ROW; +- } +- +- // For compact tables, we use statics for the "column_metadata" definition. However, in the old format, those +- // "column_metadata" are intermingled as any other "cell". In theory, this means that we'd have to do a first +- // pass to extract the static values. However, for thrift, we'll use the ThriftResultsMerger right away which +- // will re-merge static values with dynamic ones, so we can just ignore static and read every cell as a +- // "dynamic" one. For CQL, if the table is a "static compact", then is has only static columns exposed and no +- // dynamic ones. So we do a pass to extract static columns here, but will have no more work to do. Otherwise, +- // the table won't have static columns. +- if (statics.isEmpty() || isForThrift) +- return Rows.EMPTY_STATIC_ROW; +- +- assert sstable.metadata.isStaticCompactTable(); +- +- // As said above, if it's a CQL query and the table is a "static compact", the only exposed columns are the +- // static ones. So we don't have to mark the position to seek back later. +- return LegacyLayout.extractStaticColumns(sstable.metadata, file, statics); +- } +- + if (!sstable.header.hasStatic()) + return Rows.EMPTY_STATIC_ROW; + +@@ -345,7 +313,7 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator + private void createDeserializer() + { + assert file != null && deserializer == null; +- deserializer = UnfilteredDeserializer.create(sstable.metadata, file, sstable.header, helper, partitionLevelDeletion, isForThrift); ++ deserializer = UnfilteredDeserializer.create(sstable.metadata, file, sstable.header, helper); + } + + protected void seekToPosition(long position) throws IOException +diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java +index 56ba8f4..9577081 100644 +--- a/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java ++++ b/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java +@@ -43,10 +43,9 @@ public class SSTableIterator extends AbstractSSTableIterator + RowIndexEntry indexEntry, + Slices slices, + ColumnFilter columns, +- boolean isForThrift, + SegmentedFile ifile) + { +- super(sstable, file, key, indexEntry, slices, columns, isForThrift, ifile); ++ super(sstable, file, key, indexEntry, slices, columns, ifile); + } + + protected Reader createReaderInternal(RowIndexEntry indexEntry, FileDataInput file, boolean shouldCloseFile) +diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java +index b8b6f76..b4618fb 100644 +--- a/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java ++++ b/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java +@@ -46,10 +46,9 @@ public class SSTableReversedIterator extends AbstractSSTableIterator + RowIndexEntry indexEntry, + Slices slices, + ColumnFilter columns, +- boolean isForThrift, + SegmentedFile ifile) + { +- super(sstable, file, key, indexEntry, slices, columns, isForThrift, ifile); ++ super(sstable, file, key, indexEntry, slices, columns, ifile); + } + + protected Reader createReaderInternal(RowIndexEntry indexEntry, FileDataInput file, boolean shouldCloseFile) +diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java +index 0111aec..05915a8 100644 +--- a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java ++++ b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java +@@ -101,15 +101,9 @@ public class CompactionIterator extends CompactionInfo.Holder implements Unfilte + metrics.beginCompaction(this); + + UnfilteredPartitionIterator merged = scanners.isEmpty() +- ? EmptyIterators.unfilteredPartition(controller.cfs.metadata, false) ++ ? EmptyIterators.unfilteredPartition(controller.cfs.metadata) + : UnfilteredPartitionIterators.merge(scanners, nowInSec, listener()); +- boolean isForThrift = merged.isForThrift(); // to stop capture of iterator in Purger, which is confusing for debug +- this.compacted = Transformation.apply(merged, new Purger(isForThrift, controller, nowInSec)); +- } +- +- public boolean isForThrift() +- { +- return false; ++ this.compacted = Transformation.apply(merged, new Purger(controller, nowInSec)); + } + + public CFMetaData metadata() +@@ -270,9 +264,9 @@ public class CompactionIterator extends CompactionInfo.Holder implements Unfilte + + private long compactedUnfiltered; + +- private Purger(boolean isForThrift, CompactionController controller, int nowInSec) ++ private Purger(CompactionController controller, int nowInSec) + { +- super(isForThrift, nowInSec, controller.gcBefore, controller.compactingRepaired() ? Integer.MIN_VALUE : Integer.MAX_VALUE, controller.cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones()); ++ super(nowInSec, controller.gcBefore, controller.compactingRepaired() ? Integer.MIN_VALUE : Integer.MAX_VALUE, controller.cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones()); + this.controller = controller; + } + +diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java +index b6ad64c..379ba47 100644 +--- a/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java ++++ b/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java +@@ -359,11 +359,6 @@ public class LeveledCompactionStrategy extends AbstractCompactionStrategy + } + + +- public boolean isForThrift() +- { +- return false; +- } +- + public CFMetaData metadata() + { + return sstables.get(0).metadata; // The ctor checks we have at least one sstable +diff --git a/src/java/org/apache/cassandra/db/filter/DataLimits.java b/src/java/org/apache/cassandra/db/filter/DataLimits.java +index 85cae0c..5565d34 100644 +--- a/src/java/org/apache/cassandra/db/filter/DataLimits.java ++++ b/src/java/org/apache/cassandra/db/filter/DataLimits.java +@@ -34,8 +34,8 @@ import org.apache.cassandra.utils.ByteBufferUtil; + /** + * Object in charge of tracking if we have fetch enough data for a given query. + * +- * The reason this is not just a simple integer is that Thrift and CQL3 count +- * stuffs in different ways. This is what abstract those differences. ++ * This is more complicated than a single count because we support PER PARTITION ++ * limits, but also due to GROUP BY and paging. + */ + public abstract class DataLimits + { +@@ -68,7 +68,7 @@ public abstract class DataLimits + // partition (see SelectStatement.makeFilter). So an "unbounded" distinct is still actually doing some filtering. + public static final DataLimits DISTINCT_NONE = new CQLLimits(NO_LIMIT, 1, true); + +- public enum Kind { CQL_LIMIT, CQL_PAGING_LIMIT, THRIFT_LIMIT, SUPER_COLUMN_COUNTING_LIMIT } ++ public enum Kind { CQL_LIMIT, CQL_PAGING_LIMIT } + + public static DataLimits cqlLimits(int cqlRowLimit) + { +@@ -94,16 +94,6 @@ public abstract class DataLimits + return CQLLimits.distinct(cqlRowLimit); + } + +- public static DataLimits thriftLimits(int partitionLimit, int cellPerPartitionLimit) +- { +- return new ThriftLimits(partitionLimit, cellPerPartitionLimit); +- } +- +- public static DataLimits superColumnCountingLimits(int partitionLimit, int cellPerPartitionLimit) +- { +- return new SuperColumnCountingLimits(partitionLimit, cellPerPartitionLimit); +- } +- + public abstract Kind kind(); + + public abstract boolean isUnlimited(); +@@ -130,8 +120,8 @@ public abstract class DataLimits + /** + * The max number of results this limits enforces. + *

    +- * Note that the actual definition of "results" depends a bit: for CQL, it's always rows, but for +- * thrift, it means cells. ++ * Note that the actual definition of "results" depends a bit: for "normal" queries it's a number of rows, ++ * but for GROUP BY queries it's a number of groups. + * + * @return the maximum number of results this limits enforces. + */ +@@ -155,8 +145,7 @@ public abstract class DataLimits + } + + /** +- * Estimate the number of results (the definition of "results" will be rows for CQL queries +- * and partitions for thrift ones) that a full scan of the provided cfs would yield. ++ * Estimate the number of results that a full scan of the provided cfs would yield. + */ + public abstract float estimateTotalResults(ColumnFamilyStore cfs); + +@@ -502,238 +491,6 @@ public abstract class DataLimits + } + } + +- /** +- * Limits used by thrift; this count partition and cells. +- */ +- private static class ThriftLimits extends DataLimits +- { +- protected final int partitionLimit; +- protected final int cellPerPartitionLimit; +- +- private ThriftLimits(int partitionLimit, int cellPerPartitionLimit) +- { +- this.partitionLimit = partitionLimit; +- this.cellPerPartitionLimit = cellPerPartitionLimit; +- } +- +- public Kind kind() +- { +- return Kind.THRIFT_LIMIT; +- } +- +- public boolean isUnlimited() +- { +- return partitionLimit == NO_LIMIT && cellPerPartitionLimit == NO_LIMIT; +- } +- +- public boolean isDistinct() +- { +- return false; +- } +- +- public DataLimits forPaging(int pageSize) +- { +- // We don't support paging on thrift in general but do use paging under the hood for get_count. For +- // that case, we only care about limiting cellPerPartitionLimit (since it's paging over a single +- // partition). We do check that the partition limit is 1 however to make sure this is not misused +- // (as this wouldn't work properly for range queries). +- assert partitionLimit == 1; +- return new ThriftLimits(partitionLimit, pageSize); +- } +- +- public DataLimits forPaging(int pageSize, ByteBuffer lastReturnedKey, int lastReturnedKeyRemaining) +- { +- throw new UnsupportedOperationException(); +- } +- +- public DataLimits forShortReadRetry(int toFetch) +- { +- // Short read retries are always done for a single partition at a time, so it's ok to ignore the +- // partition limit for those +- return new ThriftLimits(1, toFetch); +- } +- +- public boolean hasEnoughLiveData(CachedPartition cached, int nowInSec) +- { +- // We want the number of cells that are currently live. Getting that precise number forces +- // us to iterate the cached partition in general, but we can avoid that if: +- // - The number of non-expiring live cells is greater than the number of cells asked (we then +- // know we have enough live cells). +- // - The number of cells cached is less than requested, in which case we know we won't have enough. +- if (cached.nonExpiringLiveCells() >= cellPerPartitionLimit) +- return true; +- +- if (cached.nonTombstoneCellCount() < cellPerPartitionLimit) +- return false; +- +- // Otherwise, we need to re-count +- DataLimits.Counter counter = newCounter(nowInSec, false); +- try (UnfilteredRowIterator cacheIter = cached.unfilteredIterator(ColumnFilter.selection(cached.columns()), Slices.ALL, false); +- UnfilteredRowIterator iter = counter.applyTo(cacheIter)) +- { +- // Consume the iterator until we've counted enough +- while (iter.hasNext()) +- iter.next(); +- return counter.isDone(); +- } +- } +- +- public Counter newCounter(int nowInSec, boolean assumeLiveData) +- { +- return new ThriftCounter(nowInSec, assumeLiveData); +- } +- +- public int count() +- { +- return partitionLimit * cellPerPartitionLimit; +- } +- +- public int perPartitionCount() +- { +- return cellPerPartitionLimit; +- } +- +- public float estimateTotalResults(ColumnFamilyStore cfs) +- { +- // remember that getMeansColumns returns a number of cells: we should clean nomenclature +- float cellsPerPartition = ((float) cfs.getMeanColumns()) / cfs.metadata.partitionColumns().regulars.size(); +- return cellsPerPartition * cfs.estimateKeys(); +- } +- +- protected class ThriftCounter extends Counter +- { +- protected final int nowInSec; +- protected final boolean assumeLiveData; +- +- protected int partitionsCounted; +- protected int cellsCounted; +- protected int cellsInCurrentPartition; +- +- public ThriftCounter(int nowInSec, boolean assumeLiveData) +- { +- this.nowInSec = nowInSec; +- this.assumeLiveData = assumeLiveData; +- } +- +- @Override +- public void applyToPartition(DecoratedKey partitionKey, Row staticRow) +- { +- cellsInCurrentPartition = 0; +- if (!staticRow.isEmpty()) +- applyToRow(staticRow); +- } +- +- @Override +- public Row applyToRow(Row row) +- { +- for (Cell cell : row.cells()) +- { +- if (assumeLiveData || cell.isLive(nowInSec)) +- { +- ++cellsCounted; +- if (++cellsInCurrentPartition >= cellPerPartitionLimit) +- stopInPartition(); +- } +- } +- return row; +- } +- +- @Override +- public void onPartitionClose() +- { +- if (++partitionsCounted >= partitionLimit) +- stop(); +- super.onPartitionClose(); +- } +- +- public int counted() +- { +- return cellsCounted; +- } +- +- public int countedInCurrentPartition() +- { +- return cellsInCurrentPartition; +- } +- +- public boolean isDone() +- { +- return partitionsCounted >= partitionLimit; +- } +- +- public boolean isDoneForPartition() +- { +- return isDone() || cellsInCurrentPartition >= cellPerPartitionLimit; +- } +- } +- +- @Override +- public String toString() +- { +- // This is not valid CQL, but that's ok since it's not used for CQL queries. +- return String.format("THRIFT LIMIT (partitions=%d, cells_per_partition=%d)", partitionLimit, cellPerPartitionLimit); +- } +- } +- +- /** +- * Limits used for thrift get_count when we only want to count super columns. +- */ +- private static class SuperColumnCountingLimits extends ThriftLimits +- { +- private SuperColumnCountingLimits(int partitionLimit, int cellPerPartitionLimit) +- { +- super(partitionLimit, cellPerPartitionLimit); +- } +- +- public Kind kind() +- { +- return Kind.SUPER_COLUMN_COUNTING_LIMIT; +- } +- +- public DataLimits forPaging(int pageSize) +- { +- // We don't support paging on thrift in general but do use paging under the hood for get_count. For +- // that case, we only care about limiting cellPerPartitionLimit (since it's paging over a single +- // partition). We do check that the partition limit is 1 however to make sure this is not misused +- // (as this wouldn't work properly for range queries). +- assert partitionLimit == 1; +- return new SuperColumnCountingLimits(partitionLimit, pageSize); +- } +- +- public DataLimits forShortReadRetry(int toFetch) +- { +- // Short read retries are always done for a single partition at a time, so it's ok to ignore the +- // partition limit for those +- return new SuperColumnCountingLimits(1, toFetch); +- } +- +- public Counter newCounter(int nowInSec, boolean assumeLiveData) +- { +- return new SuperColumnCountingCounter(nowInSec, assumeLiveData); +- } +- +- protected class SuperColumnCountingCounter extends ThriftCounter +- { +- public SuperColumnCountingCounter(int nowInSec, boolean assumeLiveData) +- { +- super(nowInSec, assumeLiveData); +- } +- +- @Override +- public Row applyToRow(Row row) +- { +- // In the internal format, a row == a super column, so that's what we want to count. +- if (assumeLiveData || row.hasLiveData(nowInSec)) +- { +- ++cellsCounted; +- if (++cellsInCurrentPartition >= cellPerPartitionLimit) +- stopInPartition(); +- } +- return row; +- } +- } +- } +- + public static class Serializer + { + public void serialize(DataLimits limits, DataOutputPlus out, int version) throws IOException +@@ -754,12 +511,6 @@ public abstract class DataLimits + out.writeUnsignedVInt(pagingLimits.lastReturnedKeyRemaining); + } + break; +- case THRIFT_LIMIT: +- case SUPER_COLUMN_COUNTING_LIMIT: +- ThriftLimits thriftLimits = (ThriftLimits)limits; +- out.writeUnsignedVInt(thriftLimits.partitionLimit); +- out.writeUnsignedVInt(thriftLimits.cellPerPartitionLimit); +- break; + } + } + +@@ -779,13 +530,6 @@ public abstract class DataLimits + ByteBuffer lastKey = ByteBufferUtil.readWithVIntLength(in); + int lastRemaining = (int)in.readUnsignedVInt(); + return new CQLPagingLimits(rowLimit, perPartitionLimit, isDistinct, lastKey, lastRemaining); +- case THRIFT_LIMIT: +- case SUPER_COLUMN_COUNTING_LIMIT: +- int partitionLimit = (int)in.readUnsignedVInt(); +- int cellPerPartitionLimit = (int)in.readUnsignedVInt(); +- return kind == Kind.THRIFT_LIMIT +- ? new ThriftLimits(partitionLimit, cellPerPartitionLimit) +- : new SuperColumnCountingLimits(partitionLimit, cellPerPartitionLimit); + } + throw new AssertionError(); + } +@@ -808,12 +552,6 @@ public abstract class DataLimits + size += TypeSizes.sizeofUnsignedVInt(pagingLimits.lastReturnedKeyRemaining); + } + break; +- case THRIFT_LIMIT: +- case SUPER_COLUMN_COUNTING_LIMIT: +- ThriftLimits thriftLimits = (ThriftLimits)limits; +- size += TypeSizes.sizeofUnsignedVInt(thriftLimits.partitionLimit); +- size += TypeSizes.sizeofUnsignedVInt(thriftLimits.cellPerPartitionLimit); +- break; + default: + throw new AssertionError(); + } +diff --git a/src/java/org/apache/cassandra/db/filter/RowFilter.java b/src/java/org/apache/cassandra/db/filter/RowFilter.java +index 6626275..baa2aff 100644 +--- a/src/java/org/apache/cassandra/db/filter/RowFilter.java ++++ b/src/java/org/apache/cassandra/db/filter/RowFilter.java +@@ -82,11 +82,6 @@ public abstract class RowFilter implements Iterable + return new CQLFilter(new ArrayList<>(capacity)); + } + +- public static RowFilter forThrift(int capacity) +- { +- return new ThriftFilter(new ArrayList<>(capacity)); +- } +- + public SimpleExpression add(ColumnDefinition def, Operator op, ByteBuffer value) + { + SimpleExpression expression = new SimpleExpression(def, op, value); +@@ -99,12 +94,6 @@ public abstract class RowFilter implements Iterable + add(new MapEqualityExpression(def, key, op, value)); + } + +- public void addThriftExpression(CFMetaData metadata, ByteBuffer name, Operator op, ByteBuffer value) +- { +- assert (this instanceof ThriftFilter); +- add(new ThriftExpression(metadata, name, op, value)); +- } +- + public void addCustomIndexExpression(CFMetaData cfm, IndexMetadata targetIndex, ByteBuffer value) + { + add(new CustomExpression(cfm, targetIndex, value)); +@@ -234,20 +223,6 @@ public abstract class RowFilter implements Iterable + return expressions.iterator(); + } + +- private static Clustering makeCompactClustering(CFMetaData metadata, ByteBuffer name) +- { +- assert metadata.isCompactTable(); +- if (metadata.isCompound()) +- { +- List values = CompositeType.splitName(name); +- return Clustering.make(values.toArray(new ByteBuffer[metadata.comparator.size()])); +- } +- else +- { +- return Clustering.make(name); +- } +- } +- + @Override + public String toString() + { +@@ -315,57 +290,15 @@ public abstract class RowFilter implements Iterable + } + } + +- private static class ThriftFilter extends RowFilter +- { +- private ThriftFilter(List expressions) +- { +- super(expressions); +- } +- +- public UnfilteredPartitionIterator filter(UnfilteredPartitionIterator iter, final int nowInSec) +- { +- if (expressions.isEmpty()) +- return iter; +- +- class IsSatisfiedThriftFilter extends Transformation +- { +- @Override +- public UnfilteredRowIterator applyToPartition(UnfilteredRowIterator iter) +- { +- // Thrift does not filter rows, it filters entire partition if any of the expression is not +- // satisfied, which forces us to materialize the result (in theory we could materialize only +- // what we need which might or might not be everything, but we keep it simple since in practice +- // it's not worth that it has ever been). +- ImmutableBTreePartition result = ImmutableBTreePartition.create(iter); +- iter.close(); +- +- // The partition needs to have a row for every expression, and the expression needs to be valid. +- for (Expression expr : expressions) +- { +- assert expr instanceof ThriftExpression; +- Row row = result.getRow(makeCompactClustering(iter.metadata(), expr.column().name.bytes)); +- if (row == null || !expr.isSatisfiedBy(iter.metadata(), iter.partitionKey(), row)) +- return null; +- } +- // If we get there, it means all expressions where satisfied, so return the original result +- return result.unfilteredIterator(); +- } +- } +- return Transformation.apply(iter, new IsSatisfiedThriftFilter()); +- } +- +- protected RowFilter withNewExpressions(List expressions) +- { +- return new ThriftFilter(expressions); +- } +- } +- + public static abstract class Expression + { + private static final Serializer serializer = new Serializer(); + +- // Note: the order of this enum matter, it's used for serialization +- protected enum Kind { SIMPLE, MAP_EQUALITY, THRIFT_DYN_EXPR, CUSTOM, USER } ++ // Note: the order of this enum matter, it's used for serialization, ++ // and this is why we have some UNUSEDX for values we don't use anymore ++ // (we could clean those on a major protocol update, but it's not worth ++ // the trouble for now) ++ protected enum Kind { SIMPLE, MAP_EQUALITY, UNUSED1, CUSTOM, USER } + + protected abstract Kind kind(); + protected final ColumnDefinition column; +@@ -538,9 +471,6 @@ public abstract class RowFilter implements Iterable + ByteBufferUtil.writeWithShortLength(mexpr.value, out); + } + break; +- case THRIFT_DYN_EXPR: +- ByteBufferUtil.writeWithShortLength(((ThriftExpression)expression).value, out); +- break; + } + } + +@@ -576,9 +506,7 @@ public abstract class RowFilter implements Iterable + + if (version < MessagingService.VERSION_30) + { +- if (column == null) +- kind = Kind.THRIFT_DYN_EXPR; +- else if (column.type instanceof MapType && operator == Operator.EQ) ++ if (column.type instanceof MapType && operator == Operator.EQ) + kind = Kind.MAP_EQUALITY; + else + kind = Kind.SIMPLE; +@@ -603,8 +531,6 @@ public abstract class RowFilter implements Iterable + value = ByteBufferUtil.readWithShortLength(in); + } + return new MapEqualityExpression(column, key, operator, value); +- case THRIFT_DYN_EXPR: +- return new ThriftExpression(metadata, name, operator, ByteBufferUtil.readWithShortLength(in)); + } + throw new AssertionError(); + } +@@ -636,9 +562,6 @@ public abstract class RowFilter implements Iterable + size += ByteBufferUtil.serializedSizeWithShortLength(mexpr.key) + + ByteBufferUtil.serializedSizeWithShortLength(mexpr.value); + break; +- case THRIFT_DYN_EXPR: +- size += ByteBufferUtil.serializedSizeWithShortLength(((ThriftExpression)expression).value); +- break; + case CUSTOM: + if (version >= MessagingService.VERSION_30) + size += IndexMetadata.serializer.serializedSize(((CustomExpression)expression).targetIndex, version) +@@ -899,54 +822,6 @@ public abstract class RowFilter implements Iterable + } + + /** +- * An expression of the form 'name' = 'value', but where 'name' is actually the +- * clustering value for a compact table. This is only for thrift. +- */ +- private static class ThriftExpression extends Expression +- { +- public ThriftExpression(CFMetaData metadata, ByteBuffer name, Operator operator, ByteBuffer value) +- { +- super(makeDefinition(metadata, name), operator, value); +- assert metadata.isCompactTable(); +- } +- +- private static ColumnDefinition makeDefinition(CFMetaData metadata, ByteBuffer name) +- { +- ColumnDefinition def = metadata.getColumnDefinition(name); +- if (def != null) +- return def; +- +- // In thrift, we actually allow expression on non-defined columns for the sake of filtering. To accomodate +- // this we create a "fake" definition. This is messy but it works so is probably good enough. +- return ColumnDefinition.regularDef(metadata, name, metadata.compactValueColumn().type); +- } +- +- public boolean isSatisfiedBy(CFMetaData metadata, DecoratedKey partitionKey, Row row) +- { +- assert value != null; +- +- // On thrift queries, even if the column expression is a "static" one, we'll have convert it as a "dynamic" +- // one in ThriftResultsMerger, so we always expect it to be a dynamic one. Further, we expect this is only +- // called when the row clustering does match the column (see ThriftFilter above). +- assert row.clustering().equals(makeCompactClustering(metadata, column.name.bytes)); +- Cell cell = row.getCell(metadata.compactValueColumn()); +- return cell != null && operator.isSatisfiedBy(column.type, cell.value(), value); +- } +- +- @Override +- public String toString() +- { +- return String.format("%s %s %s", column.name, operator, column.type.getString(value)); +- } +- +- @Override +- protected Kind kind() +- { +- return Kind.THRIFT_DYN_EXPR; +- } +- } +- +- /** + * A custom index expression for use with 2i implementations which support custom syntax and which are not + * necessarily linked to a single column in the base table. + */ +@@ -1100,7 +975,7 @@ public abstract class RowFilter implements Iterable + { + public void serialize(RowFilter filter, DataOutputPlus out, int version) throws IOException + { +- out.writeBoolean(filter instanceof ThriftFilter); ++ out.writeBoolean(false); // Old "is for thrift" boolean + out.writeUnsignedVInt(filter.expressions.size()); + for (Expression expr : filter.expressions) + Expression.serializer.serialize(expr, out, version); +@@ -1109,20 +984,18 @@ public abstract class RowFilter implements Iterable + + public RowFilter deserialize(DataInputPlus in, int version, CFMetaData metadata) throws IOException + { +- boolean forThrift = in.readBoolean(); ++ in.readBoolean(); // Unused + int size = (int)in.readUnsignedVInt(); + List expressions = new ArrayList<>(size); + for (int i = 0; i < size; i++) + expressions.add(Expression.serializer.deserialize(in, version, metadata)); + +- return forThrift +- ? new ThriftFilter(expressions) +- : new CQLFilter(expressions); ++ return new CQLFilter(expressions); + } + + public long serializedSize(RowFilter filter, int version) + { +- long size = 1 // forThrift ++ long size = 1 // unused boolean + + TypeSizes.sizeofUnsignedVInt(filter.expressions.size()); + for (Expression expr : filter.expressions) + size += Expression.serializer.serializedSize(expr, version); +diff --git a/src/java/org/apache/cassandra/db/marshal/CollectionType.java b/src/java/org/apache/cassandra/db/marshal/CollectionType.java +index 2f5cbb6..6cd190e 100644 +--- a/src/java/org/apache/cassandra/db/marshal/CollectionType.java ++++ b/src/java/org/apache/cassandra/db/marshal/CollectionType.java +@@ -38,7 +38,8 @@ import org.apache.cassandra.utils.ByteBufferUtil; + /** + * The abstract validator that is the base for maps, sets and lists (both frozen and non-frozen). + * +- * Please note that this comparator shouldn't be used "manually" (through thrift for instance). ++ * Please note that this comparator shouldn't be used "manually" (as a custom ++ * type for instance). + */ + public abstract class CollectionType extends AbstractType + { +diff --git a/src/java/org/apache/cassandra/db/marshal/ColumnToCollectionType.java b/src/java/org/apache/cassandra/db/marshal/ColumnToCollectionType.java +deleted file mode 100644 +index 96efa24..0000000 +--- a/src/java/org/apache/cassandra/db/marshal/ColumnToCollectionType.java ++++ /dev/null +@@ -1,153 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.db.marshal; +- +-import java.nio.ByteBuffer; +-import java.util.HashMap; +-import java.util.Map; +- +-import com.google.common.collect.ImmutableMap; +- +-import org.apache.cassandra.cql3.Term; +-import org.apache.cassandra.exceptions.ConfigurationException; +-import org.apache.cassandra.exceptions.SyntaxException; +-import org.apache.cassandra.serializers.TypeSerializer; +-import org.apache.cassandra.serializers.BytesSerializer; +-import org.apache.cassandra.serializers.MarshalException; +-import org.apache.cassandra.utils.ByteBufferUtil; +- +-/* +- * This class is deprecated and only kept for backward compatibility. +- */ +-public class ColumnToCollectionType extends AbstractType +-{ +- // interning instances +- private static final Map, ColumnToCollectionType> instances = new HashMap<>(); +- +- public final Map defined; +- +- public static ColumnToCollectionType getInstance(TypeParser parser) throws SyntaxException, ConfigurationException +- { +- return getInstance(parser.getCollectionsParameters()); +- } +- +- public static synchronized ColumnToCollectionType getInstance(Map defined) +- { +- assert defined != null; +- +- ColumnToCollectionType t = instances.get(defined); +- if (t == null) +- { +- t = new ColumnToCollectionType(defined); +- instances.put(defined, t); +- } +- return t; +- } +- +- private ColumnToCollectionType(Map defined) +- { +- super(ComparisonType.CUSTOM); +- this.defined = ImmutableMap.copyOf(defined); +- } +- +- public int compareCustom(ByteBuffer o1, ByteBuffer o2) +- { +- throw new UnsupportedOperationException("ColumnToCollectionType should only be used in composite types, never alone"); +- } +- +- public int compareCollectionMembers(ByteBuffer o1, ByteBuffer o2, ByteBuffer collectionName) +- { +- CollectionType t = defined.get(collectionName); +- if (t == null) +- throw new RuntimeException(ByteBufferUtil.bytesToHex(collectionName) + " is not defined as a collection"); +- +- return t.nameComparator().compare(o1, o2); +- } +- +- public String getString(ByteBuffer bytes) +- { +- return BytesType.instance.getString(bytes); +- } +- +- public ByteBuffer fromString(String source) +- { +- try +- { +- return ByteBufferUtil.hexToBytes(source); +- } +- catch (NumberFormatException e) +- { +- throw new MarshalException(String.format("cannot parse '%s' as hex bytes", source), e); +- } +- } +- +- @Override +- public Term fromJSONObject(Object parsed) throws MarshalException +- { +- throw new UnsupportedOperationException(); +- } +- +- @Override +- public String toJSONString(ByteBuffer buffer, int protocolVersion) +- { +- throw new UnsupportedOperationException(); +- } +- +- @Override +- public void validate(ByteBuffer bytes) +- { +- throw new UnsupportedOperationException("ColumnToCollectionType should only be used in composite types, never alone"); +- } +- +- public TypeSerializer getSerializer() +- { +- return BytesSerializer.instance; +- } +- +- public void validateCollectionMember(ByteBuffer bytes, ByteBuffer collectionName) throws MarshalException +- { +- CollectionType t = defined.get(collectionName); +- if (t == null) +- throw new MarshalException(ByteBufferUtil.bytesToHex(collectionName) + " is not defined as a collection"); +- +- t.nameComparator().validate(bytes); +- } +- +- @Override +- public boolean isCompatibleWith(AbstractType previous) +- { +- if (!(previous instanceof ColumnToCollectionType)) +- return false; +- +- ColumnToCollectionType prev = (ColumnToCollectionType)previous; +- // We are compatible if we have all the definitions previous have (but we can have more). +- for (Map.Entry entry : prev.defined.entrySet()) +- { +- CollectionType newType = defined.get(entry.getKey()); +- if (newType == null || !newType.isCompatibleWith(entry.getValue())) +- return false; +- } +- return true; +- } +- +- @Override +- public String toString() +- { +- return getClass().getName() + TypeParser.stringifyCollectionsParameters(defined); +- } +-} +diff --git a/src/java/org/apache/cassandra/db/marshal/CompositeType.java b/src/java/org/apache/cassandra/db/marshal/CompositeType.java +index 8e581b6..8782391 100644 +--- a/src/java/org/apache/cassandra/db/marshal/CompositeType.java ++++ b/src/java/org/apache/cassandra/db/marshal/CompositeType.java +@@ -28,7 +28,6 @@ import java.util.concurrent.ConcurrentMap; + import com.google.common.collect.ImmutableList; + + import org.apache.cassandra.cql3.ColumnIdentifier; +-import org.apache.cassandra.cql3.Operator; + import org.apache.cassandra.exceptions.ConfigurationException; + import org.apache.cassandra.exceptions.SyntaxException; + import org.apache.cassandra.io.util.DataOutputBuffer; +@@ -63,12 +62,12 @@ import org.apache.cassandra.utils.ByteBufferUtil; + */ + public class CompositeType extends AbstractCompositeType + { +- public static final int STATIC_MARKER = 0xFFFF; ++ private static final int STATIC_MARKER = 0xFFFF; + + public final List> types; + + // interning instances +- private static final ConcurrentMap>, CompositeType> instances = new ConcurrentHashMap>, CompositeType>(); ++ private static final ConcurrentMap>, CompositeType> instances = new ConcurrentHashMap<>(); + + public static CompositeType getInstance(TypeParser parser) throws ConfigurationException, SyntaxException + { +@@ -77,7 +76,7 @@ public class CompositeType extends AbstractCompositeType + + public static CompositeType getInstance(AbstractType... types) + { +- return getInstance(Arrays.>asList(types)); ++ return getInstance(Arrays.asList(types)); + } + + protected boolean readIsStatic(ByteBuffer bb) +@@ -128,9 +127,8 @@ public class CompositeType extends AbstractCompositeType + } + catch (IndexOutOfBoundsException e) + { +- // We shouldn't get there in general because 1) we shouldn't construct broken composites +- // from CQL and 2) broken composites coming from thrift should be rejected by validate. +- // There is a few cases however where, if the schema has changed since we created/validated ++ // We shouldn't get there in general we shouldn't construct broken composites ++ // but there is a few cases where if the schema has changed since we created/validated + // the composite, this will be thrown (see #6262). Those cases are a user error but + // throwing a more meaningful error message to make understanding such error easier. . + throw new RuntimeException("Cannot get comparator " + i + " in " + this + ". " +@@ -253,13 +251,6 @@ public class CompositeType extends AbstractCompositeType + return list; + } + +- // Extract CQL3 column name from the full column name. +- public ByteBuffer extractLastComponent(ByteBuffer bb) +- { +- int idx = types.get(types.size() - 1) instanceof ColumnToCollectionType ? types.size() - 2 : types.size() - 1; +- return extractComponent(bb, idx); +- } +- + public static boolean isStaticName(ByteBuffer bb) + { + return bb.remaining() >= 2 && (ByteBufferUtil.getShortLength(bb, bb.position()) & 0xFFFF) == STATIC_MARKER; +@@ -360,11 +351,6 @@ public class CompositeType extends AbstractCompositeType + return getClass().getName() + TypeParser.stringifyTypeParameters(types); + } + +- public Builder builder() +- { +- return new Builder(this); +- } +- + public static ByteBuffer build(ByteBuffer... buffers) + { + return build(false, buffers); +@@ -392,143 +378,4 @@ public class CompositeType extends AbstractCompositeType + out.flip(); + return out; + } +- +- public static class Builder +- { +- private final CompositeType composite; +- +- private final List components; +- private final byte[] endOfComponents; +- private int serializedSize; +- private final boolean isStatic; +- +- public Builder(CompositeType composite) +- { +- this(composite, new ArrayList(composite.types.size()), new byte[composite.types.size()], false); +- } +- +- public static Builder staticBuilder(CompositeType composite) +- { +- return new Builder(composite, new ArrayList(composite.types.size()), new byte[composite.types.size()], true); +- } +- +- private Builder(CompositeType composite, List components, byte[] endOfComponents, boolean isStatic) +- { +- assert endOfComponents.length == composite.types.size(); +- +- this.composite = composite; +- this.components = components; +- this.endOfComponents = endOfComponents; +- this.isStatic = isStatic; +- if (isStatic) +- serializedSize = 2; +- } +- +- private Builder(Builder b) +- { +- this(b.composite, new ArrayList(b.components), Arrays.copyOf(b.endOfComponents, b.endOfComponents.length), b.isStatic); +- this.serializedSize = b.serializedSize; +- } +- +- public Builder add(ByteBuffer bb) +- { +- if (components.size() >= composite.types.size()) +- throw new IllegalStateException("Composite column is already fully constructed"); +- +- components.add(bb); +- serializedSize += 3 + bb.remaining(); // 2 bytes lenght + 1 byte eoc +- return this; +- } +- +- public Builder add(ColumnIdentifier name) +- { +- return add(name.bytes); +- } +- +- public int componentCount() +- { +- return components.size(); +- } +- +- public int remainingCount() +- { +- return composite.types.size() - components.size(); +- } +- +- public ByteBuffer get(int i) +- { +- return components.get(i); +- } +- +- public ByteBuffer build() +- { +- try (DataOutputBuffer out = new DataOutputBufferFixed(serializedSize)) +- { +- if (isStatic) +- out.writeShort(STATIC_MARKER); +- +- for (int i = 0; i < components.size(); i++) +- { +- ByteBufferUtil.writeWithShortLength(components.get(i), out); +- out.write(endOfComponents[i]); +- } +- return ByteBuffer.wrap(out.getData(), 0, out.getLength()); +- } +- catch (IOException e) +- { +- throw new RuntimeException(e); +- } +- } +- +- public ByteBuffer buildAsEndOfRange() +- { +- if (components.isEmpty()) +- return ByteBufferUtil.EMPTY_BYTE_BUFFER; +- +- ByteBuffer bb = build(); +- bb.put(bb.remaining() - 1, (byte)1); +- return bb; +- } +- +- public ByteBuffer buildForRelation(Operator op) +- { +- /* +- * Given the rules for eoc (end-of-component, see AbstractCompositeType.compare()), +- * We can select: +- * - = 'a' by using <'a'><0> +- * - < 'a' by using <'a'><-1> +- * - <= 'a' by using <'a'><1> +- * - > 'a' by using <'a'><1> +- * - >= 'a' by using <'a'><0> +- */ +- int current = components.size() - 1; +- switch (op) +- { +- case LT: +- endOfComponents[current] = (byte) -1; +- break; +- case GT: +- case LTE: +- endOfComponents[current] = (byte) 1; +- break; +- default: +- endOfComponents[current] = (byte) 0; +- break; +- } +- return build(); +- } +- +- public Builder copy() +- { +- return new Builder(this); +- } +- +- public ByteBuffer getComponent(int i) +- { +- if (i >= components.size()) +- throw new IllegalArgumentException(); +- +- return components.get(i); +- } +- } + } +diff --git a/src/java/org/apache/cassandra/db/partitions/CachedBTreePartition.java b/src/java/org/apache/cassandra/db/partitions/CachedBTreePartition.java +index afe1cc3..90d64f4 100644 +--- a/src/java/org/apache/cassandra/db/partitions/CachedBTreePartition.java ++++ b/src/java/org/apache/cassandra/db/partitions/CachedBTreePartition.java +@@ -36,24 +36,17 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac + private final int cachedLiveRows; + private final int rowsWithNonExpiringCells; + +- private final int nonTombstoneCellCount; +- private final int nonExpiringLiveCells; +- + private CachedBTreePartition(CFMetaData metadata, + DecoratedKey partitionKey, + Holder holder, + int createdAtInSec, + int cachedLiveRows, +- int rowsWithNonExpiringCells, +- int nonTombstoneCellCount, +- int nonExpiringLiveCells) ++ int rowsWithNonExpiringCells) + { + super(metadata, partitionKey, holder); + this.createdAtInSec = createdAtInSec; + this.cachedLiveRows = cachedLiveRows; + this.rowsWithNonExpiringCells = rowsWithNonExpiringCells; +- this.nonTombstoneCellCount = nonTombstoneCellCount; +- this.nonExpiringLiveCells = nonExpiringLiveCells; + } + + /** +@@ -89,30 +82,24 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac + + int cachedLiveRows = 0; + int rowsWithNonExpiringCells = 0; +- int nonTombstoneCellCount = 0; +- int nonExpiringLiveCells = 0; + + for (Row row : BTree.iterable(holder.tree)) + { + if (row.hasLiveData(nowInSec)) + ++cachedLiveRows; + +- int nonExpiringLiveCellsThisRow = 0; ++ boolean hasNonExpiringLiveCell = false; + for (Cell cell : row.cells()) + { +- if (!cell.isTombstone()) ++ if (!cell.isTombstone() && !cell.isExpiring()) + { +- ++nonTombstoneCellCount; +- if (!cell.isExpiring()) +- ++nonExpiringLiveCellsThisRow; ++ hasNonExpiringLiveCell = true; ++ break; + } + } + +- if (nonExpiringLiveCellsThisRow > 0) +- { ++ if (hasNonExpiringLiveCell) + ++rowsWithNonExpiringCells; +- nonExpiringLiveCells += nonExpiringLiveCellsThisRow; +- } + } + + return new CachedBTreePartition(iterator.metadata(), +@@ -120,9 +107,7 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac + holder, + nowInSec, + cachedLiveRows, +- rowsWithNonExpiringCells, +- nonTombstoneCellCount, +- nonExpiringLiveCells); ++ rowsWithNonExpiringCells); + } + + /** +@@ -153,16 +138,6 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac + return rowsWithNonExpiringCells; + } + +- public int nonTombstoneCellCount() +- { +- return nonTombstoneCellCount; +- } +- +- public int nonExpiringLiveCells() +- { +- return nonExpiringLiveCells; +- } +- + static class Serializer implements ISerializer + { + public void serialize(CachedPartition partition, DataOutputPlus out) throws IOException +@@ -175,8 +150,6 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac + out.writeInt(p.createdAtInSec); + out.writeInt(p.cachedLiveRows); + out.writeInt(p.rowsWithNonExpiringCells); +- out.writeInt(p.nonTombstoneCellCount); +- out.writeInt(p.nonExpiringLiveCells); + CFMetaData.serializer.serialize(partition.metadata(), out, version); + try (UnfilteredRowIterator iter = p.unfilteredIterator()) + { +@@ -198,8 +171,6 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac + int createdAtInSec = in.readInt(); + int cachedLiveRows = in.readInt(); + int rowsWithNonExpiringCells = in.readInt(); +- int nonTombstoneCellCount = in.readInt(); +- int nonExpiringLiveCells = in.readInt(); + + + CFMetaData metadata = CFMetaData.serializer.deserialize(in, version); +@@ -217,9 +188,7 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac + holder, + createdAtInSec, + cachedLiveRows, +- rowsWithNonExpiringCells, +- nonTombstoneCellCount, +- nonExpiringLiveCells); ++ rowsWithNonExpiringCells); + + } + +@@ -235,8 +204,6 @@ public class CachedBTreePartition extends ImmutableBTreePartition implements Cac + return TypeSizes.sizeof(p.createdAtInSec) + + TypeSizes.sizeof(p.cachedLiveRows) + + TypeSizes.sizeof(p.rowsWithNonExpiringCells) +- + TypeSizes.sizeof(p.nonTombstoneCellCount) +- + TypeSizes.sizeof(p.nonExpiringLiveCells) + + CFMetaData.serializer.serializedSize(partition.metadata(), version) + + UnfilteredRowIteratorSerializer.serializer.serializedSize(iter, null, MessagingService.current_version, p.rowCount()); + } +diff --git a/src/java/org/apache/cassandra/db/partitions/CachedPartition.java b/src/java/org/apache/cassandra/db/partitions/CachedPartition.java +index 0cbaba0..6c781f5 100644 +--- a/src/java/org/apache/cassandra/db/partitions/CachedPartition.java ++++ b/src/java/org/apache/cassandra/db/partitions/CachedPartition.java +@@ -71,24 +71,4 @@ public interface CachedPartition extends Partition, IRowCacheEntry + * @return the last row of the partition, or {@code null} if the partition is empty. + */ + public Row lastRow(); +- +- /** +- * The number of {@code cell} objects that are not tombstone in this cached partition. +- * +- * Please note that this is not the number of live cells since +- * some of the cells might be expired. +- * +- * @return the number of non tombstone cells in the partition. +- */ +- public int nonTombstoneCellCount(); +- +- /** +- * The number of cells in this cached partition that are neither tombstone nor expiring. +- * +- * Note that this is generally not a very meaningful number, but this is used by +- * {@link org.apache.cassandra.db.filter.DataLimits#hasEnoughLiveData} as an optimization. +- * +- * @return the number of cells that are neither tombstones nor expiring. +- */ +- public int nonExpiringLiveCells(); + } +diff --git a/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java b/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java +index b7c167a..87be47c 100644 +--- a/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java ++++ b/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java +@@ -780,44 +780,21 @@ public class PartitionUpdate extends AbstractBTreePartition + { + assert !iter.isReverseOrder(); + +- if (version < MessagingService.VERSION_30) +- { +- LegacyLayout.serializeAsLegacyPartition(null, iter, out, version); +- } +- else +- { +- CFMetaData.serializer.serialize(update.metadata(), out, version); +- UnfilteredRowIteratorSerializer.serializer.serialize(iter, null, out, version, update.rowCount()); +- } ++ CFMetaData.serializer.serialize(update.metadata(), out, version); ++ UnfilteredRowIteratorSerializer.serializer.serialize(iter, null, out, version, update.rowCount()); + } + } + + public PartitionUpdate deserialize(DataInputPlus in, int version, SerializationHelper.Flag flag, ByteBuffer key) throws IOException + { +- if (version >= MessagingService.VERSION_30) +- { +- assert key == null; // key is only there for the old format +- return deserialize30(in, version, flag); +- } +- else +- { +- assert key != null; +- return deserializePre30(in, version, flag, key); +- } ++ assert key == null; // key is only there for the old format ++ return deserialize30(in, version, flag); + } + + // Used to share same decorated key between updates. + public PartitionUpdate deserialize(DataInputPlus in, int version, SerializationHelper.Flag flag, DecoratedKey key) throws IOException + { +- if (version >= MessagingService.VERSION_30) +- { + return deserialize30(in, version, flag); +- } +- else +- { +- assert key != null; +- return deserializePre30(in, version, flag, key.getKey()); +- } + } + + private static PartitionUpdate deserialize30(DataInputPlus in, int version, SerializationHelper.Flag flag) throws IOException +@@ -854,22 +831,10 @@ public class PartitionUpdate extends AbstractBTreePartition + false); + } + +- private static PartitionUpdate deserializePre30(DataInputPlus in, int version, SerializationHelper.Flag flag, ByteBuffer key) throws IOException +- { +- try (UnfilteredRowIterator iterator = LegacyLayout.deserializeLegacyPartition(in, version, flag, key)) +- { +- assert iterator != null; // This is only used in mutation, and mutation have never allowed "null" column families +- return PartitionUpdate.fromIterator(iterator, ColumnFilter.all(iterator.metadata())); +- } +- } +- + public long serializedSize(PartitionUpdate update, int version) + { + try (UnfilteredRowIterator iter = update.unfilteredIterator()) + { +- if (version < MessagingService.VERSION_30) +- return LegacyLayout.serializedSizeAsLegacyPartition(null, iter, version); +- + return CFMetaData.serializer.serializedSize(update.metadata(), version) + + UnfilteredRowIteratorSerializer.serializer.serializedSize(iter, null, version, update.rowCount()); + } +diff --git a/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java b/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java +index d3255d3..85b79d2 100644 +--- a/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java ++++ b/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java +@@ -23,14 +23,12 @@ import org.apache.cassandra.db.transform.Transformation; + + public abstract class PurgeFunction extends Transformation + { +- private final boolean isForThrift; + private final DeletionPurger purger; + private final int nowInSec; + private boolean isReverseOrder; + +- public PurgeFunction(boolean isForThrift, int nowInSec, int gcBefore, int oldestUnrepairedTombstone, boolean onlyPurgeRepairedTombstones) ++ public PurgeFunction(int nowInSec, int gcBefore, int oldestUnrepairedTombstone, boolean onlyPurgeRepairedTombstones) + { +- this.isForThrift = isForThrift; + this.nowInSec = nowInSec; + this.purger = (timestamp, localDeletionTime) -> + !(onlyPurgeRepairedTombstones && localDeletionTime >= oldestUnrepairedTombstone) +@@ -62,7 +60,7 @@ public abstract class PurgeFunction extends Transformation + { +- /** +- * Whether that partition iterator is for a thrift queries. +- *

    +- * If this is true, the partition iterator may return some empty UnfilteredRowIterator and those +- * should be preserved as thrift include partitions that "exists" (have some cells even +- * if this are actually deleted) but have nothing matching the query. +- * +- * @return whether the iterator is for a thrift query. +- */ +- public boolean isForThrift(); +- + public CFMetaData metadata(); + } +diff --git a/src/java/org/apache/cassandra/db/partitions/UnfilteredPartitionIterators.java b/src/java/org/apache/cassandra/db/partitions/UnfilteredPartitionIterators.java +index b7f6793..fd85a12 100644 +--- a/src/java/org/apache/cassandra/db/partitions/UnfilteredPartitionIterators.java ++++ b/src/java/org/apache/cassandra/db/partitions/UnfilteredPartitionIterators.java +@@ -93,7 +93,6 @@ public abstract class UnfilteredPartitionIterators + assert listener != null; + assert !iterators.isEmpty(); + +- final boolean isForThrift = iterators.get(0).isForThrift(); + final CFMetaData metadata = iterators.get(0).metadata(); + + final MergeIterator merged = MergeIterator.get(iterators, partitionComparator, new MergeIterator.Reducer() +@@ -135,11 +134,6 @@ public abstract class UnfilteredPartitionIterators + + return new AbstractUnfilteredPartitionIterator() + { +- public boolean isForThrift() +- { +- return isForThrift; +- } +- + public CFMetaData metadata() + { + return metadata; +@@ -170,7 +164,6 @@ public abstract class UnfilteredPartitionIterators + if (iterators.size() == 1) + return iterators.get(0); + +- final boolean isForThrift = iterators.get(0).isForThrift(); + final CFMetaData metadata = iterators.get(0).metadata(); + + final MergeIterator merged = MergeIterator.get(iterators, partitionComparator, new MergeIterator.Reducer() +@@ -201,11 +194,6 @@ public abstract class UnfilteredPartitionIterators + + return new AbstractUnfilteredPartitionIterator() + { +- public boolean isForThrift() +- { +- return isForThrift; +- } +- + public CFMetaData metadata() + { + return metadata; +@@ -285,7 +273,9 @@ public abstract class UnfilteredPartitionIterators + { + assert version >= MessagingService.VERSION_30; // We handle backward compatibility directy in ReadResponse.LegacyRangeSliceReplySerializer + +- out.writeBoolean(iter.isForThrift()); ++ // Previously, a boolean indicating if this was for a thrift query. ++ // Unused since 4.0 but kept on wire for compatibility. ++ out.writeBoolean(false); + while (iter.hasNext()) + { + out.writeBoolean(true); +@@ -300,7 +290,8 @@ public abstract class UnfilteredPartitionIterators + public UnfilteredPartitionIterator deserialize(final DataInputPlus in, final int version, final CFMetaData metadata, final ColumnFilter selection, final SerializationHelper.Flag flag) throws IOException + { + assert version >= MessagingService.VERSION_30; // We handle backward compatibility directy in ReadResponse.LegacyRangeSliceReplySerializer +- final boolean isForThrift = in.readBoolean(); ++ // Skip now unused isForThrift boolean ++ in.readBoolean(); + + return new AbstractUnfilteredPartitionIterator() + { +@@ -308,11 +299,6 @@ public abstract class UnfilteredPartitionIterators + private boolean hasNext; + private boolean nextReturned = true; + +- public boolean isForThrift() +- { +- return isForThrift; +- } +- + public CFMetaData metadata() + { + return metadata; +diff --git a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java +index 14730ac..481425e 100644 +--- a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java ++++ b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java +@@ -32,7 +32,6 @@ import org.apache.cassandra.db.filter.ColumnFilter; + import org.apache.cassandra.io.sstable.IndexInfo; + import org.apache.cassandra.io.sstable.format.SSTableReader; + import org.apache.cassandra.io.sstable.metadata.StatsMetadata; +-import org.apache.cassandra.thrift.ThriftResultsMerger; + import org.apache.cassandra.utils.IteratorWithLowerBound; + + /** +@@ -48,27 +47,18 @@ public class UnfilteredRowIteratorWithLowerBound extends LazilyInitializedUnfilt + private final SSTableReader sstable; + private final ClusteringIndexFilter filter; + private final ColumnFilter selectedColumns; +- private final boolean isForThrift; +- private final int nowInSec; +- private final boolean applyThriftTransformation; + private ClusteringBound lowerBound; + private boolean firstItemRetrieved; + + public UnfilteredRowIteratorWithLowerBound(DecoratedKey partitionKey, + SSTableReader sstable, + ClusteringIndexFilter filter, +- ColumnFilter selectedColumns, +- boolean isForThrift, +- int nowInSec, +- boolean applyThriftTransformation) ++ ColumnFilter selectedColumns) + { + super(partitionKey); + this.sstable = sstable; + this.filter = filter; + this.selectedColumns = selectedColumns; +- this.isForThrift = isForThrift; +- this.nowInSec = nowInSec; +- this.applyThriftTransformation = applyThriftTransformation; + this.lowerBound = null; + this.firstItemRetrieved = false; + } +@@ -102,10 +92,8 @@ public class UnfilteredRowIteratorWithLowerBound extends LazilyInitializedUnfilt + sstable.incrementReadCount(); + + @SuppressWarnings("resource") // 'iter' is added to iterators which is closed on exception, or through the closing of the final merged iterator +- UnfilteredRowIterator iter = sstable.iterator(partitionKey(), filter.getSlices(metadata()), selectedColumns, filter.isReversed(), isForThrift); +- return isForThrift && applyThriftTransformation +- ? ThriftResultsMerger.maybeWrap(iter, nowInSec) +- : iter; ++ UnfilteredRowIterator iter = sstable.iterator(partitionKey(), filter.getSlices(metadata()), selectedColumns, filter.isReversed()); ++ return iter; + } + + @Override +diff --git a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIterators.java b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIterators.java +index 46447ec..d97e980 100644 +--- a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIterators.java ++++ b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIterators.java +@@ -151,12 +151,6 @@ public abstract class UnfilteredRowIterators + */ + public static void digest(ReadCommand command, UnfilteredRowIterator iterator, MessageDigest digest, int version) + { +- if (version < MessagingService.VERSION_30) +- { +- LegacyLayout.fromUnfilteredRowIterator(command, iterator).digest(iterator.metadata(), digest); +- return; +- } +- + digest.update(iterator.partitionKey().getKey().duplicate()); + iterator.partitionLevelDeletion().digest(digest); + iterator.columns().regulars.digest(digest); +diff --git a/src/java/org/apache/cassandra/db/transform/Filter.java b/src/java/org/apache/cassandra/db/transform/Filter.java +index 48c8b1a..729f06b 100644 +--- a/src/java/org/apache/cassandra/db/transform/Filter.java ++++ b/src/java/org/apache/cassandra/db/transform/Filter.java +@@ -25,7 +25,7 @@ import org.apache.cassandra.db.rows.*; + + final class Filter extends Transformation + { +- private final boolean filterEmpty; // generally maps to !isForThrift, but also false for direct row filtration ++ private final boolean filterEmpty; // generally true except for direct row filtration + private final int nowInSec; + public Filter(boolean filterEmpty, int nowInSec) + { +diff --git a/src/java/org/apache/cassandra/db/transform/FilteredPartitions.java b/src/java/org/apache/cassandra/db/transform/FilteredPartitions.java +index 09e36b4..ed643bb 100644 +--- a/src/java/org/apache/cassandra/db/transform/FilteredPartitions.java ++++ b/src/java/org/apache/cassandra/db/transform/FilteredPartitions.java +@@ -52,7 +52,7 @@ public final class FilteredPartitions extends BasePartitions implements UnfilteredPartitionIterator + { +- final boolean isForThrift; +- + // wrap an iterator for transformation + public UnfilteredPartitions(UnfilteredPartitionIterator input) + { + super(input); +- this.isForThrift = input.isForThrift(); +- } +- +- public boolean isForThrift() +- { +- return isForThrift; + } + + public CFMetaData metadata() +diff --git a/src/java/org/apache/cassandra/hadoop/ConfigHelper.java b/src/java/org/apache/cassandra/hadoop/ConfigHelper.java +index a4deb4a..2e49207 100644 +--- a/src/java/org/apache/cassandra/hadoop/ConfigHelper.java ++++ b/src/java/org/apache/cassandra/hadoop/ConfigHelper.java +@@ -28,16 +28,9 @@ import org.slf4j.LoggerFactory; + + import org.apache.cassandra.dht.IPartitioner; + import org.apache.cassandra.schema.CompressionParams; +-import org.apache.cassandra.thrift.*; + import org.apache.cassandra.utils.FBUtilities; + import org.apache.cassandra.utils.Hex; + import org.apache.hadoop.conf.Configuration; +-import org.apache.thrift.TBase; +-import org.apache.thrift.TDeserializer; +-import org.apache.thrift.TException; +-import org.apache.thrift.TSerializer; +-import org.apache.thrift.protocol.TBinaryProtocol; +-import org.apache.thrift.transport.TTransport; + + public class ConfigHelper + { +@@ -59,16 +52,13 @@ public class ConfigHelper + private static final int DEFAULT_SPLIT_SIZE = 64 * 1024; + private static final String RANGE_BATCH_SIZE_CONFIG = "cassandra.range.batch.size"; + private static final int DEFAULT_RANGE_BATCH_SIZE = 4096; +- private static final String INPUT_THRIFT_PORT = "cassandra.input.thrift.port"; +- private static final String OUTPUT_THRIFT_PORT = "cassandra.output.thrift.port"; +- private static final String INPUT_INITIAL_THRIFT_ADDRESS = "cassandra.input.thrift.address"; +- private static final String OUTPUT_INITIAL_THRIFT_ADDRESS = "cassandra.output.thrift.address"; ++ private static final String INPUT_INITIAL_ADDRESS = "cassandra.input.address"; ++ private static final String OUTPUT_INITIAL_ADDRESS = "cassandra.output.address"; + private static final String READ_CONSISTENCY_LEVEL = "cassandra.consistencylevel.read"; + private static final String WRITE_CONSISTENCY_LEVEL = "cassandra.consistencylevel.write"; + private static final String OUTPUT_COMPRESSION_CLASS = "cassandra.output.compression.class"; + private static final String OUTPUT_COMPRESSION_CHUNK_LENGTH = "cassandra.output.compression.length"; + private static final String OUTPUT_LOCAL_DC_ONLY = "cassandra.output.local.dc.only"; +- private static final String THRIFT_FRAMED_TRANSPORT_SIZE_IN_MB = "cassandra.thrift.framed.size_mb"; + + private static final Logger logger = LoggerFactory.getLogger(ConfigHelper.class); + +@@ -213,104 +203,28 @@ public class ConfigHelper + } + + /** +- * Set the predicate that determines what columns will be selected from each row. +- * +- * @param conf Job configuration you are about to run +- * @param predicate +- */ +- public static void setInputSlicePredicate(Configuration conf, SlicePredicate predicate) +- { +- conf.set(INPUT_PREDICATE_CONFIG, thriftToString(predicate)); +- } +- +- public static SlicePredicate getInputSlicePredicate(Configuration conf) +- { +- String s = conf.get(INPUT_PREDICATE_CONFIG); +- return s == null ? null : predicateFromString(s); +- } +- +- private static String thriftToString(TBase object) +- { +- assert object != null; +- // this is so awful it's kind of cool! +- TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory()); +- try +- { +- return Hex.bytesToHex(serializer.serialize(object)); +- } +- catch (TException e) +- { +- throw new RuntimeException(e); +- } +- } +- +- private static SlicePredicate predicateFromString(String st) +- { +- assert st != null; +- TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory()); +- SlicePredicate predicate = new SlicePredicate(); +- try +- { +- deserializer.deserialize(predicate, Hex.hexToBytes(st)); +- } +- catch (TException e) +- { +- throw new RuntimeException(e); +- } +- return predicate; +- } +- +- /** + * Set the KeyRange to limit the rows. + * @param conf Job configuration you are about to run + */ + public static void setInputRange(Configuration conf, String startToken, String endToken) + { +- KeyRange range = new KeyRange().setStart_token(startToken).setEnd_token(endToken); +- conf.set(INPUT_KEYRANGE_CONFIG, thriftToString(range)); ++ conf.set(INPUT_KEYRANGE_CONFIG, startToken + "," + endToken); + } + + /** +- * Set the KeyRange to limit the rows. +- * @param conf Job configuration you are about to run +- */ +- public static void setInputRange(Configuration conf, String startToken, String endToken, List filter) +- { +- KeyRange range = new KeyRange().setStart_token(startToken).setEnd_token(endToken).setRow_filter(filter); +- conf.set(INPUT_KEYRANGE_CONFIG, thriftToString(range)); +- } +- +- /** +- * Set the KeyRange to limit the rows. +- * @param conf Job configuration you are about to run ++ * The start and end token of the input key range as a pair. ++ * ++ * may be null if unset. + */ +- public static void setInputRange(Configuration conf, List filter) +- { +- KeyRange range = new KeyRange().setRow_filter(filter); +- conf.set(INPUT_KEYRANGE_CONFIG, thriftToString(range)); +- } +- +- /** may be null if unset */ +- public static KeyRange getInputKeyRange(Configuration conf) ++ public static Pair getInputKeyRange(Configuration conf) + { + String str = conf.get(INPUT_KEYRANGE_CONFIG); +- return str == null ? null : keyRangeFromString(str); +- } ++ if (str == null) ++ return null; + +- private static KeyRange keyRangeFromString(String st) +- { +- assert st != null; +- TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory()); +- KeyRange keyRange = new KeyRange(); +- try +- { +- deserializer.deserialize(keyRange, Hex.hexToBytes(st)); +- } +- catch (TException e) +- { +- throw new RuntimeException(e); +- } +- return keyRange; ++ String[] parts = str.split(","); ++ assert parts.length == 2; ++ return Pair.create(parts[0], parts[1]); + } + + public static String getInputKeyspace(Configuration conf) +@@ -413,24 +327,14 @@ public class ConfigHelper + conf.set(WRITE_CONSISTENCY_LEVEL, consistencyLevel); + } + +- public static int getInputRpcPort(Configuration conf) +- { +- return Integer.parseInt(conf.get(INPUT_THRIFT_PORT, "9160")); +- } +- +- public static void setInputRpcPort(Configuration conf, String port) +- { +- conf.set(INPUT_THRIFT_PORT, port); +- } +- + public static String getInputInitialAddress(Configuration conf) + { +- return conf.get(INPUT_INITIAL_THRIFT_ADDRESS); ++ return conf.get(INPUT_INITIAL_ADDRESS); + } + + public static void setInputInitialAddress(Configuration conf, String address) + { +- conf.set(INPUT_INITIAL_THRIFT_ADDRESS, address); ++ conf.set(INPUT_INITIAL_ADDRESS, address); + } + + public static void setInputPartitioner(Configuration conf, String classname) +@@ -443,24 +347,14 @@ public class ConfigHelper + return FBUtilities.newPartitioner(conf.get(INPUT_PARTITIONER_CONFIG)); + } + +- public static int getOutputRpcPort(Configuration conf) +- { +- return Integer.parseInt(conf.get(OUTPUT_THRIFT_PORT, "9160")); +- } +- +- public static void setOutputRpcPort(Configuration conf, String port) +- { +- conf.set(OUTPUT_THRIFT_PORT, port); +- } +- + public static String getOutputInitialAddress(Configuration conf) + { +- return conf.get(OUTPUT_INITIAL_THRIFT_ADDRESS); ++ return conf.get(OUTPUT_INITIAL_ADDRESS); + } + + public static void setOutputInitialAddress(Configuration conf, String address) + { +- conf.set(OUTPUT_INITIAL_THRIFT_ADDRESS, address); ++ conf.set(OUTPUT_INITIAL_ADDRESS, address); + } + + public static void setOutputPartitioner(Configuration conf, String classname) +@@ -493,20 +387,6 @@ public class ConfigHelper + conf.set(OUTPUT_COMPRESSION_CHUNK_LENGTH, length); + } + +- public static void setThriftFramedTransportSizeInMb(Configuration conf, int frameSizeInMB) +- { +- conf.setInt(THRIFT_FRAMED_TRANSPORT_SIZE_IN_MB, frameSizeInMB); +- } +- +- /** +- * @param conf The configuration to use. +- * @return Value (converts MBs to Bytes) set by {@link #setThriftFramedTransportSizeInMb(Configuration, int)} or default of 15MB +- */ +- public static int getThriftFramedTransportSize(Configuration conf) +- { +- return conf.getInt(THRIFT_FRAMED_TRANSPORT_SIZE_IN_MB, 15) * 1024 * 1024; // 15MB is default in Cassandra +- } +- + public static boolean getOutputLocalDCOnly(Configuration conf) + { + return Boolean.parseBoolean(conf.get(OUTPUT_LOCAL_DC_ONLY, "false")); +@@ -517,79 +397,6 @@ public class ConfigHelper + conf.set(OUTPUT_LOCAL_DC_ONLY, Boolean.toString(localDCOnly)); + } + +- public static Cassandra.Client getClientFromInputAddressList(Configuration conf) throws IOException +- { +- return getClientFromAddressList(conf, ConfigHelper.getInputInitialAddress(conf).split(","), ConfigHelper.getInputRpcPort(conf)); +- } +- +- public static Cassandra.Client getClientFromOutputAddressList(Configuration conf) throws IOException +- { +- return getClientFromAddressList(conf, ConfigHelper.getOutputInitialAddress(conf).split(","), ConfigHelper.getOutputRpcPort(conf)); +- } +- +- private static Cassandra.Client getClientFromAddressList(Configuration conf, String[] addresses, int port) throws IOException +- { +- Cassandra.Client client = null; +- List exceptions = new ArrayList(); +- for (String address : addresses) +- { +- try +- { +- client = createConnection(conf, address, port); +- break; +- } +- catch (IOException ioe) +- { +- exceptions.add(ioe); +- } +- } +- if (client == null) +- { +- logger.error("failed to connect to any initial addresses"); +- for (IOException ioe : exceptions) +- { +- logger.error("", ioe); +- } +- throw exceptions.get(exceptions.size() - 1); +- } +- return client; +- } +- +- @SuppressWarnings("resource") +- public static Cassandra.Client createConnection(Configuration conf, String host, Integer port) throws IOException +- { +- try +- { +- TTransport transport = getClientTransportFactory(conf).openTransport(host, port); +- return new Cassandra.Client(new TBinaryProtocol(transport, true, true)); +- } +- catch (Exception e) +- { +- throw new IOException("Unable to connect to server " + host + ":" + port, e); +- } +- } +- +- public static ITransportFactory getClientTransportFactory(Configuration conf) +- { +- String factoryClassName = conf.get(ITransportFactory.PROPERTY_KEY, TFramedTransportFactory.class.getName()); +- ITransportFactory factory = getClientTransportFactory(factoryClassName); +- Map options = getOptions(conf, factory.supportedOptions()); +- factory.setOptions(options); +- return factory; +- } +- +- private static ITransportFactory getClientTransportFactory(String factoryClassName) +- { +- try +- { +- return (ITransportFactory) Class.forName(factoryClassName).newInstance(); +- } +- catch (Exception e) +- { +- throw new RuntimeException("Failed to instantiate transport factory:" + factoryClassName, e); +- } +- } +- + private static Map getOptions(Configuration conf, Set supportedOptions) + { + Map options = new HashMap<>(); +diff --git a/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java b/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java +index daba701..390c78c 100644 +--- a/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java ++++ b/src/java/org/apache/cassandra/hadoop/cql3/CqlInputFormat.java +@@ -41,8 +41,8 @@ import org.slf4j.Logger; + import org.slf4j.LoggerFactory; + import org.apache.cassandra.db.SystemKeyspace; + import org.apache.cassandra.dht.*; +-import org.apache.cassandra.thrift.KeyRange; + import org.apache.cassandra.hadoop.*; ++import org.apache.cassandra.utils.*; + + import static java.util.stream.Collectors.toMap; + +@@ -132,30 +132,12 @@ public class CqlInputFormat extends org.apache.hadoop.mapreduce.InputFormat>> splitfutures = new ArrayList<>(); +- KeyRange jobKeyRange = ConfigHelper.getInputKeyRange(conf); ++ Pair jobKeyRange = ConfigHelper.getInputKeyRange(conf); + Range jobRange = null; + if (jobKeyRange != null) + { +- if (jobKeyRange.start_key != null) +- { +- if (!partitioner.preservesOrder()) +- throw new UnsupportedOperationException("KeyRange based on keys can only be used with a order preserving partitioner"); +- if (jobKeyRange.start_token != null) +- throw new IllegalArgumentException("only start_key supported"); +- if (jobKeyRange.end_token != null) +- throw new IllegalArgumentException("only start_key supported"); +- jobRange = new Range<>(partitioner.getToken(jobKeyRange.start_key), +- partitioner.getToken(jobKeyRange.end_key)); +- } +- else if (jobKeyRange.start_token != null) +- { +- jobRange = new Range<>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token), +- partitioner.getTokenFactory().fromString(jobKeyRange.end_token)); +- } +- else +- { +- logger.warn("ignoring jobKeyRange specified without start_key or start_token"); +- } ++ jobRange = new Range<>(partitioner.getTokenFactory().fromString(jobKeyRange.left), ++ partitioner.getTokenFactory().fromString(jobKeyRange.right)); + } + + Metadata metadata = cluster.getMetadata(); +diff --git a/src/java/org/apache/cassandra/index/internal/CassandraIndexFunctions.java b/src/java/org/apache/cassandra/index/internal/CassandraIndexFunctions.java +index 89eebdf..8047e1d 100644 +--- a/src/java/org/apache/cassandra/index/internal/CassandraIndexFunctions.java ++++ b/src/java/org/apache/cassandra/index/internal/CassandraIndexFunctions.java +@@ -59,7 +59,7 @@ public interface CassandraIndexFunctions + * *other* clustering values in the index - the indexed value being the index table's partition key + * * When the indexed value is a collection value, in which case we also need to capture the cell path from the base + * table +- * * In a KEYS index (for thrift/compact storage/static column indexes), where only the base partition key is ++ * * In a KEYS index (for compact storage/static column indexes), where only the base partition key is + * held in the index table. + * + * Called from indexCfsMetadata +diff --git a/src/java/org/apache/cassandra/index/internal/IndexEntry.java b/src/java/org/apache/cassandra/index/internal/IndexEntry.java +index 8ffd26a..2c128ce 100644 +--- a/src/java/org/apache/cassandra/index/internal/IndexEntry.java ++++ b/src/java/org/apache/cassandra/index/internal/IndexEntry.java +@@ -29,7 +29,7 @@ import org.apache.cassandra.db.DecoratedKey; + /** + * Entries in indexes on non-compact tables (tables with composite comparators) + * can be encapsulated as IndexedEntry instances. These are not used when dealing +- * with indexes on static/compact/thrift tables (i.e. KEYS indexes). ++ * with indexes on static/compact tables (i.e. KEYS indexes). + */ + public final class IndexEntry + { +diff --git a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java +index e24e441..5ac2af8 100644 +--- a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java ++++ b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java +@@ -70,11 +70,6 @@ public class CompositesSearcher extends CassandraIndexSearcher + + private UnfilteredRowIterator next; + +- public boolean isForThrift() +- { +- return command.isForThrift(); +- } +- + public CFMetaData metadata() + { + return command.metadata(); +diff --git a/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java b/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java +index 0169d3f..febb09f 100644 +--- a/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java ++++ b/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java +@@ -23,12 +23,10 @@ import org.slf4j.Logger; + import org.slf4j.LoggerFactory; + + import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.config.ColumnDefinition; + import org.apache.cassandra.db.*; + import org.apache.cassandra.db.filter.ColumnFilter; + import org.apache.cassandra.db.filter.DataLimits; + import org.apache.cassandra.db.filter.RowFilter; +-import org.apache.cassandra.db.partitions.ImmutableBTreePartition; + import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator; + import org.apache.cassandra.db.rows.*; + import org.apache.cassandra.index.internal.CassandraIndex; +@@ -57,11 +55,6 @@ public class KeysSearcher extends CassandraIndexSearcher + { + private UnfilteredRowIterator next; + +- public boolean isForThrift() +- { +- return command.isForThrift(); +- } +- + public CFMetaData metadata() + { + return command.metadata(); +@@ -92,8 +85,7 @@ public class KeysSearcher extends CassandraIndexSearcher + continue; + + ColumnFilter extendedFilter = getExtendedFilter(command.columnFilter()); +- SinglePartitionReadCommand dataCmd = SinglePartitionReadCommand.create(isForThrift(), +- index.baseCfs.metadata, ++ SinglePartitionReadCommand dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata, + command.nowInSec(), + extendedFilter, + command.rowFilter(), +@@ -108,7 +100,6 @@ public class KeysSearcher extends CassandraIndexSearcher + hit, + indexKey.getKey(), + executionController.writeOpOrderGroup(), +- isForThrift(), + command.nowInSec()); + + if (dataIter != null) +@@ -151,66 +142,23 @@ public class KeysSearcher extends CassandraIndexSearcher + Row indexHit, + ByteBuffer indexedValue, + OpOrder.Group writeOp, +- boolean isForThrift, + int nowInSec) + { +- if (isForThrift) ++ assert iterator.metadata().isCompactTable(); ++ Row data = iterator.staticRow(); ++ if (index.isStale(data, indexedValue, nowInSec)) + { +- // The data we got has gone though ThrifResultsMerger, so we're looking for the row whose clustering +- // is the indexed name and so we need to materialize the partition. +- ImmutableBTreePartition result = ImmutableBTreePartition.create(iterator); ++ // Index is stale, remove the index entry and ignore ++ index.deleteStaleEntry(index.getIndexCfs().decorateKey(indexedValue), ++ makeIndexClustering(iterator.partitionKey().getKey(), Clustering.EMPTY), ++ new DeletionTime(indexHit.primaryKeyLivenessInfo().timestamp(), nowInSec), ++ writeOp); + iterator.close(); +- Row data = result.getRow(Clustering.make(index.getIndexedColumn().name.bytes)); +- if (data == null) +- return null; +- +- // for thrift tables, we need to compare the index entry against the compact value column, +- // not the column actually designated as the indexed column so we don't use the index function +- // lib for the staleness check like we do in every other case +- Cell baseData = data.getCell(index.baseCfs.metadata.compactValueColumn()); +- if (baseData == null || !baseData.isLive(nowInSec) || index.getIndexedColumn().type.compare(indexedValue, baseData.value()) != 0) +- { +- // Index is stale, remove the index entry and ignore +- index.deleteStaleEntry(index.getIndexCfs().decorateKey(indexedValue), +- Clustering.make(index.getIndexedColumn().name.bytes), +- new DeletionTime(indexHit.primaryKeyLivenessInfo().timestamp(), nowInSec), +- writeOp); +- return null; +- } +- else +- { +- if (command.columnFilter().fetches(index.getIndexedColumn())) +- return result.unfilteredIterator(); +- +- // The query on the base table used an extended column filter to ensure that the +- // indexed column was actually read for use in the staleness check, before +- // returning the results we must filter the base table partition so that it +- // contains only the originally requested columns. See CASSANDRA-11523 +- ClusteringComparator comparator = result.metadata().comparator; +- Slices.Builder slices = new Slices.Builder(comparator); +- for (ColumnDefinition selected : command.columnFilter().fetchedColumns()) +- slices.add(Slice.make(comparator, selected.name.bytes)); +- return result.unfilteredIterator(ColumnFilter.all(command.metadata()), slices.build(), false); +- } ++ return null; + } + else + { +- assert iterator.metadata().isCompactTable(); +- Row data = iterator.staticRow(); +- if (index.isStale(data, indexedValue, nowInSec)) +- { +- // Index is stale, remove the index entry and ignore +- index.deleteStaleEntry(index.getIndexCfs().decorateKey(indexedValue), +- makeIndexClustering(iterator.partitionKey().getKey(), Clustering.EMPTY), +- new DeletionTime(indexHit.primaryKeyLivenessInfo().timestamp(), nowInSec), +- writeOp); +- iterator.close(); +- return null; +- } +- else +- { +- return iterator; +- } ++ return iterator; + } + } + } +diff --git a/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java b/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java +index c8ae0d8..0d4ee48 100644 +--- a/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java ++++ b/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java +@@ -63,11 +63,6 @@ public class QueryController + this.executionStart = System.nanoTime(); + } + +- public boolean isForThrift() +- { +- return command.isForThrift(); +- } +- + public CFMetaData metadata() + { + return command.metadata(); +@@ -101,8 +96,7 @@ public class QueryController + throw new NullPointerException(); + try + { +- SinglePartitionReadCommand partition = SinglePartitionReadCommand.create(command.isForThrift(), +- cfs.metadata, ++ SinglePartitionReadCommand partition = SinglePartitionReadCommand.create(cfs.metadata, + command.nowInSec(), + command.columnFilter(), + command.rowFilter().withoutExpressions(), +diff --git a/src/java/org/apache/cassandra/index/sasi/plan/QueryPlan.java b/src/java/org/apache/cassandra/index/sasi/plan/QueryPlan.java +index 4410756..8a25f79 100644 +--- a/src/java/org/apache/cassandra/index/sasi/plan/QueryPlan.java ++++ b/src/java/org/apache/cassandra/index/sasi/plan/QueryPlan.java +@@ -153,11 +153,6 @@ public class QueryPlan + } + } + +- public boolean isForThrift() +- { +- return controller.isForThrift(); +- } +- + public CFMetaData metadata() + { + return controller.metadata(); +diff --git a/src/java/org/apache/cassandra/io/sstable/IndexInfo.java b/src/java/org/apache/cassandra/io/sstable/IndexInfo.java +index b07ce4a..4daf196 100644 +--- a/src/java/org/apache/cassandra/io/sstable/IndexInfo.java ++++ b/src/java/org/apache/cassandra/io/sstable/IndexInfo.java +@@ -19,11 +19,14 @@ + package org.apache.cassandra.io.sstable; + + import java.io.IOException; ++import java.util.List; + + import org.apache.cassandra.db.ClusteringPrefix; + import org.apache.cassandra.db.DeletionTime; + import org.apache.cassandra.db.RowIndexEntry; ++import org.apache.cassandra.db.SerializationHeader; + import org.apache.cassandra.db.TypeSizes; ++import org.apache.cassandra.db.marshal.AbstractType; + import org.apache.cassandra.io.ISerializer; + import org.apache.cassandra.io.sstable.format.Version; + import org.apache.cassandra.io.util.DataInputPlus; +@@ -81,6 +84,11 @@ public class IndexInfo + this.endOpenMarker = endOpenMarker; + } + ++ public static IndexInfo.Serializer serializer(Version version, SerializationHeader header) ++ { ++ return new IndexInfo.Serializer(version, header.clusteringTypes()); ++ } ++ + public static class Serializer implements ISerializer + { + // This is the default index size that we use to delta-encode width when serializing so we get better vint-encoding. +@@ -89,21 +97,19 @@ public class IndexInfo + // size so using the default is almost surely better than using no base at all. + public static final long WIDTH_BASE = 64 * 1024; + +- private final ISerializer clusteringSerializer; +- private final Version version; ++ private final int version; ++ private final List> clusteringTypes; + +- public Serializer(Version version, ISerializer clusteringSerializer) ++ public Serializer(Version version, List> clusteringTypes) + { +- this.clusteringSerializer = clusteringSerializer; +- this.version = version; ++ this.version = version.correspondingMessagingVersion(); ++ this.clusteringTypes = clusteringTypes; + } + + public void serialize(IndexInfo info, DataOutputPlus out) throws IOException + { +- assert version.storeRows() : "We read old index files but we should never write them"; +- +- clusteringSerializer.serialize(info.firstName, out); +- clusteringSerializer.serialize(info.lastName, out); ++ ClusteringPrefix.serializer.serialize(info.firstName, out, version, clusteringTypes); ++ ClusteringPrefix.serializer.serialize(info.lastName, out, version, clusteringTypes); + out.writeUnsignedVInt(info.offset); + out.writeVInt(info.width - WIDTH_BASE); + +@@ -114,53 +120,33 @@ public class IndexInfo + + public void skip(DataInputPlus in) throws IOException + { +- clusteringSerializer.skip(in); +- clusteringSerializer.skip(in); +- if (version.storeRows()) +- { +- in.readUnsignedVInt(); +- in.readVInt(); +- if (in.readBoolean()) +- DeletionTime.serializer.skip(in); +- } +- else +- { +- in.skipBytes(TypeSizes.sizeof(0L)); +- in.skipBytes(TypeSizes.sizeof(0L)); +- } ++ ClusteringPrefix.serializer.skip(in, version, clusteringTypes); ++ ClusteringPrefix.serializer.skip(in, version, clusteringTypes); ++ in.readUnsignedVInt(); ++ in.readVInt(); ++ if (in.readBoolean()) ++ DeletionTime.serializer.skip(in); + } + + public IndexInfo deserialize(DataInputPlus in) throws IOException + { +- ClusteringPrefix firstName = clusteringSerializer.deserialize(in); +- ClusteringPrefix lastName = clusteringSerializer.deserialize(in); +- long offset; +- long width; ++ ClusteringPrefix firstName = ClusteringPrefix.serializer.deserialize(in, version, clusteringTypes); ++ ClusteringPrefix lastName = ClusteringPrefix.serializer.deserialize(in, version, clusteringTypes); ++ long offset = in.readUnsignedVInt(); ++ long width = in.readVInt() + WIDTH_BASE; + DeletionTime endOpenMarker = null; +- if (version.storeRows()) +- { +- offset = in.readUnsignedVInt(); +- width = in.readVInt() + WIDTH_BASE; +- if (in.readBoolean()) +- endOpenMarker = DeletionTime.serializer.deserialize(in); +- } +- else +- { +- offset = in.readLong(); +- width = in.readLong(); +- } ++ if (in.readBoolean()) ++ endOpenMarker = DeletionTime.serializer.deserialize(in); + return new IndexInfo(firstName, lastName, offset, width, endOpenMarker); + } + + public long serializedSize(IndexInfo info) + { +- assert version.storeRows() : "We read old index files but we should never write them"; +- +- long size = clusteringSerializer.serializedSize(info.firstName) +- + clusteringSerializer.serializedSize(info.lastName) +- + TypeSizes.sizeofUnsignedVInt(info.offset) +- + TypeSizes.sizeofVInt(info.width - WIDTH_BASE) +- + TypeSizes.sizeof(info.endOpenMarker != null); ++ long size = ClusteringPrefix.serializer.serializedSize(info.firstName, version, clusteringTypes) ++ + ClusteringPrefix.serializer.serializedSize(info.lastName, version, clusteringTypes) ++ + TypeSizes.sizeofUnsignedVInt(info.offset) ++ + TypeSizes.sizeofVInt(info.width - WIDTH_BASE) ++ + TypeSizes.sizeof(info.endOpenMarker != null); + + if (info.endOpenMarker != null) + size += DeletionTime.serializer.serializedSize(info.endOpenMarker); +diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleIterator.java b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleIterator.java +index eb69271..5a12f35 100644 +--- a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleIterator.java ++++ b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleIterator.java +@@ -54,10 +54,7 @@ public abstract class SSTableSimpleIterator extends AbstractIterator + + public static SSTableSimpleIterator create(CFMetaData metadata, DataInputPlus in, SerializationHeader header, SerializationHelper helper, DeletionTime partitionDeletion) + { +- if (helper.version < MessagingService.VERSION_30) +- return new OldFormatIterator(metadata, in, helper, partitionDeletion); +- else +- return new CurrentFormatIterator(metadata, in, header, helper); ++ return new CurrentFormatIterator(metadata, in, header, helper); + } + + public abstract Row readStaticRow() throws IOException; +@@ -93,75 +90,4 @@ public abstract class SSTableSimpleIterator extends AbstractIterator + } + } + } +- +- private static class OldFormatIterator extends SSTableSimpleIterator +- { +- private final UnfilteredDeserializer deserializer; +- +- private OldFormatIterator(CFMetaData metadata, DataInputPlus in, SerializationHelper helper, DeletionTime partitionDeletion) +- { +- super(metadata, in, helper); +- // We use an UnfilteredDeserializer because even though we don't need all it's fanciness, it happens to handle all +- // the details we need for reading the old format. +- this.deserializer = UnfilteredDeserializer.create(metadata, in, null, helper, partitionDeletion, false); +- } +- +- public Row readStaticRow() throws IOException +- { +- if (metadata.isCompactTable()) +- { +- // For static compact tables, in the old format, static columns are intermingled with the other columns, so we +- // need to extract them. Which imply 2 passes (one to extract the static, then one for other value). +- if (metadata.isStaticCompactTable()) +- { +- assert in instanceof RewindableDataInput; +- RewindableDataInput file = (RewindableDataInput)in; +- DataPosition mark = file.mark(); +- Row staticRow = LegacyLayout.extractStaticColumns(metadata, file, metadata.partitionColumns().statics); +- file.reset(mark); +- +- // We've extracted the static columns, so we must ignore them on the 2nd pass +- ((UnfilteredDeserializer.OldFormatDeserializer)deserializer).setSkipStatic(); +- return staticRow; +- } +- else +- { +- return Rows.EMPTY_STATIC_ROW; +- } +- } +- +- return deserializer.hasNext() && deserializer.nextIsStatic() +- ? (Row)deserializer.readNext() +- : Rows.EMPTY_STATIC_ROW; +- +- } +- +- protected Unfiltered computeNext() +- { +- while (true) +- { +- try +- { +- if (!deserializer.hasNext()) +- return endOfData(); +- +- Unfiltered unfiltered = deserializer.readNext(); +- if (metadata.isStaticCompactTable() && unfiltered.kind() == Unfiltered.Kind.ROW) +- { +- Row row = (Row) unfiltered; +- ColumnDefinition def = metadata.getColumnDefinition(LegacyLayout.encodeClustering(metadata, row.clustering())); +- if (def != null && def.isStatic()) +- continue; +- } +- return unfiltered; +- } +- catch (IOException e) +- { +- throw new IOError(e); +- } +- } +- } +- +- } +- + } +diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java +index d11e057..646f371 100644 +--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java ++++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java +@@ -1521,8 +1521,8 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted iterator; + + // Full scan of the sstables + public static ISSTableScanner getScanner(SSTableReader sstable, RateLimiter limiter) + { +- return new BigTableScanner(sstable, ColumnFilter.all(sstable.metadata), null, limiter, false, Iterators.singletonIterator(fullRange(sstable))); ++ return new BigTableScanner(sstable, ColumnFilter.all(sstable.metadata), null, limiter, Iterators.singletonIterator(fullRange(sstable))); + } + +- public static ISSTableScanner getScanner(SSTableReader sstable, ColumnFilter columns, DataRange dataRange, RateLimiter limiter, boolean isForThrift) ++ public static ISSTableScanner getScanner(SSTableReader sstable, ColumnFilter columns, DataRange dataRange, RateLimiter limiter) + { +- return new BigTableScanner(sstable, columns, dataRange, limiter, isForThrift, makeBounds(sstable, dataRange).iterator()); ++ return new BigTableScanner(sstable, columns, dataRange, limiter, makeBounds(sstable, dataRange).iterator()); + } + + public static ISSTableScanner getScanner(SSTableReader sstable, Collection> tokenRanges, RateLimiter limiter) +@@ -83,15 +82,15 @@ public class BigTableScanner implements ISSTableScanner + if (positions.isEmpty()) + return new EmptySSTableScanner(sstable); + +- return new BigTableScanner(sstable, ColumnFilter.all(sstable.metadata), null, limiter, false, makeBounds(sstable, tokenRanges).iterator()); ++ return new BigTableScanner(sstable, ColumnFilter.all(sstable.metadata), null, limiter, makeBounds(sstable, tokenRanges).iterator()); + } + + public static ISSTableScanner getScanner(SSTableReader sstable, Iterator> rangeIterator) + { +- return new BigTableScanner(sstable, ColumnFilter.all(sstable.metadata), null, null, false, rangeIterator); ++ return new BigTableScanner(sstable, ColumnFilter.all(sstable.metadata), null, null, rangeIterator); + } + +- private BigTableScanner(SSTableReader sstable, ColumnFilter columns, DataRange dataRange, RateLimiter limiter, boolean isForThrift, Iterator> rangeIterator) ++ private BigTableScanner(SSTableReader sstable, ColumnFilter columns, DataRange dataRange, RateLimiter limiter, Iterator> rangeIterator) + { + assert sstable != null; + +@@ -103,7 +102,6 @@ public class BigTableScanner implements ISSTableScanner + this.rowIndexEntrySerializer = sstable.descriptor.version.getSSTableFormat().getIndexSerializer(sstable.metadata, + sstable.descriptor.version, + sstable.header); +- this.isForThrift = isForThrift; + this.rangeIterator = rangeIterator; + } + +@@ -181,7 +179,7 @@ public class BigTableScanner implements ISSTableScanner + if (indexDecoratedKey.compareTo(currentRange.left) > 0 || currentRange.contains(indexDecoratedKey)) + { + // Found, just read the dataPosition and seek into index and data files +- long dataPosition = RowIndexEntry.Serializer.readPosition(ifile, sstable.descriptor.version); ++ long dataPosition = RowIndexEntry.Serializer.readPosition(ifile); + ifile.seek(indexPosition); + dfile.seek(dataPosition); + break; +@@ -228,11 +226,6 @@ public class BigTableScanner implements ISSTableScanner + return sstable.toString(); + } + +- public boolean isForThrift() +- { +- return isForThrift; +- } +- + public CFMetaData metadata() + { + return sstable.metadata; +@@ -335,7 +328,7 @@ public class BigTableScanner implements ISSTableScanner + } + + ClusteringIndexFilter filter = dataRange.clusteringIndexFilter(partitionKey()); +- return sstable.iterator(dfile, partitionKey(), currentEntry, filter.getSlices(BigTableScanner.this.metadata()), columns, filter.isReversed(), isForThrift); ++ return sstable.iterator(dfile, partitionKey(), currentEntry, filter.getSlices(BigTableScanner.this.metadata()), columns, filter.isReversed()); + } + catch (CorruptSSTableException | IOException e) + { +@@ -387,11 +380,6 @@ public class BigTableScanner implements ISSTableScanner + return sstable.getFilename(); + } + +- public boolean isForThrift() +- { +- return false; +- } +- + public CFMetaData metadata() + { + return sstable.metadata; +diff --git a/src/java/org/apache/cassandra/scheduler/IRequestScheduler.java b/src/java/org/apache/cassandra/scheduler/IRequestScheduler.java +deleted file mode 100644 +index 798f09e..0000000 +--- a/src/java/org/apache/cassandra/scheduler/IRequestScheduler.java ++++ /dev/null +@@ -1,41 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.scheduler; +- +-import java.util.concurrent.TimeoutException; +- +-/** +- * Implementors of IRequestScheduler must provide a constructor taking a RequestSchedulerOptions object. +- */ +-public interface IRequestScheduler +-{ +- /** +- * Queue incoming request threads +- * +- * @param t Thread handing the request +- * @param id Scheduling parameter, an id to distinguish profiles (users/keyspace) +- * @param timeoutMS The max time in milliseconds to spend blocking for a slot +- */ +- public void queue(Thread t, String id, long timeoutMS) throws TimeoutException; +- +- /** +- * A convenience method for indicating when a particular request has completed +- * processing, and before a return to the client +- */ +- public void release(); +-} +diff --git a/src/java/org/apache/cassandra/scheduler/NoScheduler.java b/src/java/org/apache/cassandra/scheduler/NoScheduler.java +deleted file mode 100644 +index d3f4ce8..0000000 +--- a/src/java/org/apache/cassandra/scheduler/NoScheduler.java ++++ /dev/null +@@ -1,37 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.scheduler; +- +-import org.apache.cassandra.config.RequestSchedulerOptions; +- +- +-/** +- * This is basically not having a scheduler, the requests are +- * processed as normally would be handled by the JVM. +- */ +-public class NoScheduler implements IRequestScheduler +-{ +- +- public NoScheduler(RequestSchedulerOptions options) {} +- +- public NoScheduler() {} +- +- public void queue(Thread t, String id, long timeoutMS) {} +- +- public void release() {} +-} +diff --git a/src/java/org/apache/cassandra/scheduler/RoundRobinScheduler.java b/src/java/org/apache/cassandra/scheduler/RoundRobinScheduler.java +deleted file mode 100644 +index c98c0fe..0000000 +--- a/src/java/org/apache/cassandra/scheduler/RoundRobinScheduler.java ++++ /dev/null +@@ -1,161 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.scheduler; +- +- +-import java.util.Map; +-import java.util.concurrent.Semaphore; +-import java.util.concurrent.TimeoutException; +- +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.cassandra.config.RequestSchedulerOptions; +-import org.cliffc.high_scale_lib.NonBlockingHashMap; +- +-/** +- * A very basic Round Robin implementation of the RequestScheduler. It handles +- * request groups identified on user/keyspace by placing them in separate +- * queues and servicing a request from each queue in a RoundRobin fashion. +- * It optionally adds weights for each round. +- */ +-public class RoundRobinScheduler implements IRequestScheduler +-{ +- private static final Logger logger = LoggerFactory.getLogger(RoundRobinScheduler.class); +- +- //Map of queue id to weighted queue +- private final NonBlockingHashMap queues; +- +- private final Semaphore taskCount; +- +- // Tracks the count of threads available in all queues +- // Used by the the scheduler thread so we don't need to busy-wait until there is a request to process +- private final Semaphore queueSize = new Semaphore(0, false); +- +- private final int defaultWeight; +- private final Map weights; +- +- public RoundRobinScheduler(RequestSchedulerOptions options) +- { +- defaultWeight = options.default_weight; +- weights = options.weights; +- +- // the task count is acquired for the first time _after_ releasing a thread, so we pre-decrement +- taskCount = new Semaphore(options.throttle_limit - 1); +- +- queues = new NonBlockingHashMap(); +- Runnable runnable = new Runnable() +- { +- public void run() +- { +- while (true) +- { +- schedule(); +- } +- } +- }; +- Thread scheduler = new Thread(runnable, "REQUEST-SCHEDULER"); +- scheduler.start(); +- logger.info("Started the RoundRobin Request Scheduler"); +- } +- +- public void queue(Thread t, String id, long timeoutMS) throws TimeoutException +- { +- WeightedQueue weightedQueue = getWeightedQueue(id); +- +- try +- { +- queueSize.release(); +- try +- { +- weightedQueue.put(t, timeoutMS); +- // the scheduler will release us when a slot is available +- } +- catch (TimeoutException | InterruptedException e) +- { +- queueSize.acquireUninterruptibly(); +- throw e; +- } +- } +- catch (InterruptedException e) +- { +- throw new RuntimeException("Interrupted while queueing requests", e); +- } +- } +- +- public void release() +- { +- taskCount.release(); +- } +- +- private void schedule() +- { +- queueSize.acquireUninterruptibly(); +- for (Map.Entry request : queues.entrySet()) +- { +- WeightedQueue queue = request.getValue(); +- //Using the weight, process that many requests at a time (for that scheduler id) +- for (int i=0; i queue; +- public WeightedQueue(String key, int weight) +- { +- this.key = key; +- this.weight = weight; +- this.queue = new SynchronousQueue(true); +- this.metric = new LatencyMetrics("scheduler", "WeightedQueue", key); +- } +- +- public void put(Thread t, long timeoutMS) throws InterruptedException, TimeoutException +- { +- if (!queue.offer(new WeightedQueue.Entry(t), timeoutMS, TimeUnit.MILLISECONDS)) +- throw new TimeoutException("Failed to acquire request scheduler slot for '" + key + "'"); +- } +- +- public Thread poll() +- { +- Entry e = queue.poll(); +- if (e == null) +- return null; +- metric.addNano(System.nanoTime() - e.creationTime); +- return e.thread; +- } +- +- @Override +- public String toString() +- { +- return "RoundRobinScheduler.WeightedQueue(key=" + key + " weight=" + weight + ")"; +- } +- +- private final static class Entry +- { +- public final long creationTime = System.nanoTime(); +- public final Thread thread; +- public Entry(Thread thread) +- { +- this.thread = thread; +- } +- } +-} +diff --git a/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java b/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java +deleted file mode 100644 +index 7d1d228..0000000 +--- a/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java ++++ /dev/null +@@ -1,1101 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.schema; +- +-import java.nio.ByteBuffer; +-import java.util.*; +-import java.util.stream.Collectors; +- +-import com.google.common.collect.HashMultimap; +-import com.google.common.collect.ImmutableList; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.cassandra.config.*; +-import org.apache.cassandra.cql3.ColumnIdentifier; +-import org.apache.cassandra.cql3.FieldIdentifier; +-import org.apache.cassandra.cql3.QueryProcessor; +-import org.apache.cassandra.cql3.UntypedResultSet; +-import org.apache.cassandra.cql3.functions.FunctionName; +-import org.apache.cassandra.cql3.functions.UDAggregate; +-import org.apache.cassandra.cql3.functions.UDFunction; +-import org.apache.cassandra.db.*; +-import org.apache.cassandra.db.compaction.AbstractCompactionStrategy; +-import org.apache.cassandra.db.marshal.*; +-import org.apache.cassandra.db.rows.RowIterator; +-import org.apache.cassandra.db.rows.UnfilteredRowIterators; +-import org.apache.cassandra.exceptions.InvalidRequestException; +-import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.cassandra.utils.FBUtilities; +-import org.apache.cassandra.utils.concurrent.OpOrder; +- +-import static java.lang.String.format; +-import static org.apache.cassandra.utils.ByteBufferUtil.bytes; +-import static org.apache.cassandra.utils.FBUtilities.fromJsonMap; +- +-/** +- * This majestic class performs migration from legacy (pre-3.0) system.schema_* schema tables to the new and glorious +- * system_schema keyspace. +- * +- * The goal is to not lose any information in the migration - including the timestamps. +- */ +-@SuppressWarnings("deprecation") +-public final class LegacySchemaMigrator +-{ +- private LegacySchemaMigrator() +- { +- } +- +- private static final Logger logger = LoggerFactory.getLogger(LegacySchemaMigrator.class); +- +- static final List LegacySchemaTables = +- ImmutableList.of(SystemKeyspace.LegacyKeyspaces, +- SystemKeyspace.LegacyColumnfamilies, +- SystemKeyspace.LegacyColumns, +- SystemKeyspace.LegacyTriggers, +- SystemKeyspace.LegacyUsertypes, +- SystemKeyspace.LegacyFunctions, +- SystemKeyspace.LegacyAggregates); +- +- public static void migrate() +- { +- // read metadata from the legacy schema tables +- Collection keyspaces = readSchema(); +- +- // if already upgraded, or starting a new 3.0 node, abort early +- if (keyspaces.isEmpty()) +- { +- unloadLegacySchemaTables(); +- return; +- } +- +- // write metadata to the new schema tables +- logger.info("Moving {} keyspaces from legacy schema tables to the new schema keyspace ({})", +- keyspaces.size(), +- SchemaKeyspace.NAME); +- keyspaces.forEach(LegacySchemaMigrator::storeKeyspaceInNewSchemaTables); +- keyspaces.forEach(LegacySchemaMigrator::migrateBuiltIndexesForKeyspace); +- +- // flush the new tables before truncating the old ones +- SchemaKeyspace.flush(); +- +- // truncate the original tables (will be snapshotted now, and will have been snapshotted by pre-flight checks) +- logger.info("Truncating legacy schema tables"); +- truncateLegacySchemaTables(); +- +- // remove legacy schema tables from Schema, so that their presence doesn't give the users any wrong ideas +- unloadLegacySchemaTables(); +- +- logger.info("Completed migration of legacy schema tables"); +- } +- +- private static void migrateBuiltIndexesForKeyspace(Keyspace keyspace) +- { +- keyspace.tables.forEach(LegacySchemaMigrator::migrateBuiltIndexesForTable); +- } +- +- private static void migrateBuiltIndexesForTable(Table table) +- { +- table.metadata.getIndexes().forEach((index) -> migrateIndexBuildStatus(table.metadata.ksName, +- table.metadata.cfName, +- index)); +- } +- +- private static void migrateIndexBuildStatus(String keyspace, String table, IndexMetadata index) +- { +- if (SystemKeyspace.isIndexBuilt(keyspace, table + '.' + index.name)) +- { +- SystemKeyspace.setIndexBuilt(keyspace, index.name); +- SystemKeyspace.setIndexRemoved(keyspace, table + '.' + index.name); +- } +- } +- +- static void unloadLegacySchemaTables() +- { +- KeyspaceMetadata systemKeyspace = Schema.instance.getKSMetaData(SystemKeyspace.NAME); +- +- Tables systemTables = systemKeyspace.tables; +- for (CFMetaData table : LegacySchemaTables) +- systemTables = systemTables.without(table.cfName); +- +- LegacySchemaTables.forEach(Schema.instance::unload); +- LegacySchemaTables.forEach((cfm) -> org.apache.cassandra.db.Keyspace.openAndGetStore(cfm).invalidate()); +- +- Schema.instance.setKeyspaceMetadata(systemKeyspace.withSwapped(systemTables)); +- } +- +- private static void truncateLegacySchemaTables() +- { +- LegacySchemaTables.forEach(table -> Schema.instance.getColumnFamilyStoreInstance(table.cfId).truncateBlocking()); +- } +- +- private static void storeKeyspaceInNewSchemaTables(Keyspace keyspace) +- { +- logger.info("Migrating keyspace {}", keyspace); +- +- Mutation.SimpleBuilder builder = SchemaKeyspace.makeCreateKeyspaceMutation(keyspace.name, keyspace.params, keyspace.timestamp); +- for (Table table : keyspace.tables) +- SchemaKeyspace.addTableToSchemaMutation(table.metadata, true, builder.timestamp(table.timestamp)); +- +- for (Type type : keyspace.types) +- SchemaKeyspace.addTypeToSchemaMutation(type.metadata, builder.timestamp(type.timestamp)); +- +- for (Function function : keyspace.functions) +- SchemaKeyspace.addFunctionToSchemaMutation(function.metadata, builder.timestamp(function.timestamp)); +- +- for (Aggregate aggregate : keyspace.aggregates) +- SchemaKeyspace.addAggregateToSchemaMutation(aggregate.metadata, builder.timestamp(aggregate.timestamp)); +- +- builder.build().apply(); +- } +- +- /* +- * Read all keyspaces metadata (including nested tables, types, and functions), with their modification timestamps +- */ +- private static Collection readSchema() +- { +- String query = format("SELECT keyspace_name FROM %s.%s", SystemKeyspace.NAME, SystemKeyspace.LEGACY_KEYSPACES); +- Collection keyspaceNames = new ArrayList<>(); +- query(query).forEach(row -> keyspaceNames.add(row.getString("keyspace_name"))); +- keyspaceNames.removeAll(Schema.SYSTEM_KEYSPACE_NAMES); +- +- Collection keyspaces = new ArrayList<>(); +- keyspaceNames.forEach(name -> keyspaces.add(readKeyspace(name))); +- return keyspaces; +- } +- +- private static Keyspace readKeyspace(String keyspaceName) +- { +- long timestamp = readKeyspaceTimestamp(keyspaceName); +- KeyspaceParams params = readKeyspaceParams(keyspaceName); +- +- Collection tables = readTables(keyspaceName); +- Collection types = readTypes(keyspaceName); +- Collection functions = readFunctions(keyspaceName); +- Functions.Builder functionsBuilder = Functions.builder(); +- functions.forEach(udf -> functionsBuilder.add(udf.metadata)); +- Collection aggregates = readAggregates(functionsBuilder.build(), keyspaceName); +- +- return new Keyspace(timestamp, keyspaceName, params, tables, types, functions, aggregates); +- } +- +- /* +- * Reading keyspace params +- */ +- +- private static long readKeyspaceTimestamp(String keyspaceName) +- { +- String query = format("SELECT writeTime(durable_writes) AS timestamp FROM %s.%s WHERE keyspace_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_KEYSPACES); +- return query(query, keyspaceName).one().getLong("timestamp"); +- } +- +- private static KeyspaceParams readKeyspaceParams(String keyspaceName) +- { +- String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_KEYSPACES); +- UntypedResultSet.Row row = query(query, keyspaceName).one(); +- +- boolean durableWrites = row.getBoolean("durable_writes"); +- +- Map replication = new HashMap<>(); +- replication.putAll(fromJsonMap(row.getString("strategy_options"))); +- replication.put(ReplicationParams.CLASS, row.getString("strategy_class")); +- +- return KeyspaceParams.create(durableWrites, replication); +- } +- +- /* +- * Reading tables +- */ +- +- private static Collection
    readTables(String keyspaceName) +- { +- String query = format("SELECT columnfamily_name FROM %s.%s WHERE keyspace_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_COLUMNFAMILIES); +- Collection tableNames = new ArrayList<>(); +- query(query, keyspaceName).forEach(row -> tableNames.add(row.getString("columnfamily_name"))); +- +- Collection
    tables = new ArrayList<>(); +- tableNames.forEach(name -> tables.add(readTable(keyspaceName, name))); +- return tables; +- } +- +- private static Table readTable(String keyspaceName, String tableName) +- { +- long timestamp = readTableTimestamp(keyspaceName, tableName); +- CFMetaData metadata = readTableMetadata(keyspaceName, tableName); +- return new Table(timestamp, metadata); +- } +- +- private static long readTableTimestamp(String keyspaceName, String tableName) +- { +- String query = format("SELECT writeTime(type) AS timestamp FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_COLUMNFAMILIES); +- return query(query, keyspaceName, tableName).one().getLong("timestamp"); +- } +- +- private static CFMetaData readTableMetadata(String keyspaceName, String tableName) +- { +- String tableQuery = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_COLUMNFAMILIES); +- UntypedResultSet.Row tableRow = query(tableQuery, keyspaceName, tableName).one(); +- +- String columnsQuery = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_COLUMNS); +- UntypedResultSet columnRows = query(columnsQuery, keyspaceName, tableName); +- +- String triggersQuery = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_TRIGGERS); +- UntypedResultSet triggerRows = query(triggersQuery, keyspaceName, tableName); +- +- return decodeTableMetadata(tableRow, columnRows, triggerRows); +- } +- +- private static CFMetaData decodeTableMetadata(UntypedResultSet.Row tableRow, +- UntypedResultSet columnRows, +- UntypedResultSet triggerRows) +- { +- String ksName = tableRow.getString("keyspace_name"); +- String cfName = tableRow.getString("columnfamily_name"); +- +- AbstractType rawComparator = TypeParser.parse(tableRow.getString("comparator")); +- AbstractType subComparator = tableRow.has("subcomparator") ? TypeParser.parse(tableRow.getString("subcomparator")) : null; +- +- boolean isSuper = "super".equals(tableRow.getString("type").toLowerCase()); +- boolean isCompound = rawComparator instanceof CompositeType || isSuper; +- +- /* +- * Determine whether or not the table is *really* dense +- * We cannot trust is_dense value of true (see CASSANDRA-11502, that fixed the issue for 2.2 only, and not retroactively), +- * but we can trust is_dense value of false. +- */ +- Boolean rawIsDense = tableRow.has("is_dense") ? tableRow.getBoolean("is_dense") : null; +- boolean isDense; +- if (rawIsDense != null && !rawIsDense) +- isDense = false; +- else +- isDense = calculateIsDense(rawComparator, columnRows); +- +- // now, if switched to sparse, remove redundant compact_value column and the last clustering column, +- // directly copying CASSANDRA-11502 logic. See CASSANDRA-11315. +- Iterable filteredColumnRows = !isDense && (rawIsDense == null || rawIsDense) +- ? filterOutRedundantRowsForSparse(columnRows, isSuper, isCompound) +- : columnRows; +- +- // We don't really use the default validator but as we have it for backward compatibility, we use it to know if it's a counter table +- AbstractType defaultValidator = TypeParser.parse(tableRow.getString("default_validator")); +- boolean isCounter = defaultValidator instanceof CounterColumnType; +- +- /* +- * With CASSANDRA-5202 we stopped inferring the cf id from the combination of keyspace/table names, +- * and started storing the generated uuids in system.schema_columnfamilies. +- * +- * In 3.0 we SHOULD NOT see tables like that (2.0-created, non-upgraded). +- * But in the off-chance that we do, we generate the deterministic uuid here. +- */ +- UUID cfId = tableRow.has("cf_id") +- ? tableRow.getUUID("cf_id") +- : CFMetaData.generateLegacyCfId(ksName, cfName); +- +- boolean isCQLTable = !isSuper && !isDense && isCompound; +- boolean isStaticCompactTable = !isDense && !isCompound; +- +- // Internally, compact tables have a specific layout, see CompactTables. But when upgrading from +- // previous versions, they may not have the expected schema, so detect if we need to upgrade and do +- // it in createColumnsFromColumnRows. +- // We can remove this once we don't support upgrade from versions < 3.0. +- boolean needsUpgrade = !isCQLTable && checkNeedsUpgrade(filteredColumnRows, isSuper, isStaticCompactTable); +- +- List columnDefs = createColumnsFromColumnRows(filteredColumnRows, +- ksName, +- cfName, +- rawComparator, +- subComparator, +- isSuper, +- isCQLTable, +- isStaticCompactTable, +- needsUpgrade); +- +- if (needsUpgrade) +- { +- addDefinitionForUpgrade(columnDefs, +- ksName, +- cfName, +- isStaticCompactTable, +- isSuper, +- rawComparator, +- subComparator, +- defaultValidator); +- } +- +- CFMetaData cfm = CFMetaData.create(ksName, +- cfName, +- cfId, +- isDense, +- isCompound, +- isSuper, +- isCounter, +- false, // legacy schema did not contain views +- columnDefs, +- DatabaseDescriptor.getPartitioner()); +- +- Indexes indexes = createIndexesFromColumnRows(cfm, +- filteredColumnRows, +- ksName, +- cfName, +- rawComparator, +- subComparator, +- isSuper, +- isCQLTable, +- isStaticCompactTable, +- needsUpgrade); +- cfm.indexes(indexes); +- +- if (tableRow.has("dropped_columns")) +- addDroppedColumns(cfm, rawComparator, tableRow.getMap("dropped_columns", UTF8Type.instance, LongType.instance)); +- +- return cfm.params(decodeTableParams(tableRow)) +- .triggers(createTriggersFromTriggerRows(triggerRows)); +- } +- +- /* +- * We call dense a CF for which each component of the comparator is a clustering column, i.e. no +- * component is used to store a regular column names. In other words, non-composite static "thrift" +- * and CQL3 CF are *not* dense. +- * We save whether the table is dense or not during table creation through CQL, but we don't have this +- * information for table just created through thrift, nor for table prior to CASSANDRA-7744, so this +- * method does its best to infer whether the table is dense or not based on other elements. +- */ +- private static boolean calculateIsDense(AbstractType comparator, UntypedResultSet columnRows) +- { +- /* +- * As said above, this method is only here because we need to deal with thrift upgrades. +- * Once a CF has been "upgraded", i.e. we've rebuilt and save its CQL3 metadata at least once, +- * then we'll have saved the "is_dense" value and will be good to go. +- * +- * But non-upgraded thrift CF (and pre-7744 CF) will have no value for "is_dense", so we need +- * to infer that information without relying on it in that case. And for the most part this is +- * easy, a CF that has at least one REGULAR definition is not dense. But the subtlety is that not +- * having a REGULAR definition may not mean dense because of CQL3 definitions that have only the +- * PRIMARY KEY defined. +- * +- * So we need to recognize those special case CQL3 table with only a primary key. If we have some +- * clustering columns, we're fine as said above. So the only problem is that we cannot decide for +- * sure if a CF without REGULAR columns nor CLUSTERING_COLUMN definition is meant to be dense, or if it +- * has been created in CQL3 by say: +- * CREATE TABLE test (k int PRIMARY KEY) +- * in which case it should not be dense. However, we can limit our margin of error by assuming we are +- * in the latter case only if the comparator is exactly CompositeType(UTF8Type). +- */ +- for (UntypedResultSet.Row columnRow : columnRows) +- if ("regular".equals(columnRow.getString("type"))) +- return false; +- +- int maxClusteringIdx = -1; +- for (UntypedResultSet.Row columnRow : columnRows) +- if ("clustering_key".equals(columnRow.getString("type"))) +- maxClusteringIdx = Math.max(maxClusteringIdx, columnRow.has("component_index") ? columnRow.getInt("component_index") : 0); +- +- return maxClusteringIdx >= 0 +- ? maxClusteringIdx == comparator.componentsCount() - 1 +- : !isCQL3OnlyPKComparator(comparator); +- } +- +- private static Iterable filterOutRedundantRowsForSparse(UntypedResultSet columnRows, boolean isSuper, boolean isCompound) +- { +- Collection filteredRows = new ArrayList<>(); +- for (UntypedResultSet.Row columnRow : columnRows) +- { +- String kind = columnRow.getString("type"); +- +- if ("compact_value".equals(kind)) +- continue; +- +- if ("clustering_key".equals(kind)) +- { +- int position = columnRow.has("component_index") ? columnRow.getInt("component_index") : 0; +- if (isSuper && position != 0) +- continue; +- +- if (!isSuper && !isCompound) +- continue; +- } +- +- filteredRows.add(columnRow); +- } +- +- return filteredRows; +- } +- +- private static boolean isCQL3OnlyPKComparator(AbstractType comparator) +- { +- if (!(comparator instanceof CompositeType)) +- return false; +- +- CompositeType ct = (CompositeType)comparator; +- return ct.types.size() == 1 && ct.types.get(0) instanceof UTF8Type; +- } +- +- private static TableParams decodeTableParams(UntypedResultSet.Row row) +- { +- TableParams.Builder params = TableParams.builder(); +- +- params.readRepairChance(row.getDouble("read_repair_chance")) +- .dcLocalReadRepairChance(row.getDouble("local_read_repair_chance")) +- .gcGraceSeconds(row.getInt("gc_grace_seconds")); +- +- if (row.has("comment")) +- params.comment(row.getString("comment")); +- +- if (row.has("memtable_flush_period_in_ms")) +- params.memtableFlushPeriodInMs(row.getInt("memtable_flush_period_in_ms")); +- +- params.caching(CachingParams.fromMap(fromJsonMap(row.getString("caching")))); +- +- if (row.has("default_time_to_live")) +- params.defaultTimeToLive(row.getInt("default_time_to_live")); +- +- if (row.has("speculative_retry")) +- params.speculativeRetry(SpeculativeRetryParam.fromString(row.getString("speculative_retry"))); +- +- Map compressionParameters = fromJsonMap(row.getString("compression_parameters")); +- String crcCheckChance = compressionParameters.remove("crc_check_chance"); +- //crc_check_chance was promoted from a compression property to a top-level property +- if (crcCheckChance != null) +- params.crcCheckChance(Double.parseDouble(crcCheckChance)); +- +- params.compression(CompressionParams.fromMap(compressionParameters)); +- +- params.compaction(compactionFromRow(row)); +- +- if (row.has("min_index_interval")) +- params.minIndexInterval(row.getInt("min_index_interval")); +- +- if (row.has("max_index_interval")) +- params.maxIndexInterval(row.getInt("max_index_interval")); +- +- if (row.has("bloom_filter_fp_chance")) +- params.bloomFilterFpChance(row.getDouble("bloom_filter_fp_chance")); +- +- return params.build(); +- } +- +- /* +- * The method is needed - to migrate max_compaction_threshold and min_compaction_threshold +- * to the compaction map, where they belong. +- * +- * We must use reflection to validate the options because not every compaction strategy respects and supports +- * the threshold params (LCS doesn't, STCS and DTCS do). +- */ +- @SuppressWarnings("unchecked") +- private static CompactionParams compactionFromRow(UntypedResultSet.Row row) +- { +- Class klass = +- CFMetaData.createCompactionStrategy(row.getString("compaction_strategy_class")); +- Map options = fromJsonMap(row.getString("compaction_strategy_options")); +- +- int minThreshold = row.getInt("min_compaction_threshold"); +- int maxThreshold = row.getInt("max_compaction_threshold"); +- +- Map optionsWithThresholds = new HashMap<>(options); +- optionsWithThresholds.putIfAbsent(CompactionParams.Option.MIN_THRESHOLD.toString(), Integer.toString(minThreshold)); +- optionsWithThresholds.putIfAbsent(CompactionParams.Option.MAX_THRESHOLD.toString(), Integer.toString(maxThreshold)); +- +- try +- { +- Map unrecognizedOptions = +- (Map) klass.getMethod("validateOptions", Map.class).invoke(null, optionsWithThresholds); +- +- if (unrecognizedOptions.isEmpty()) +- options = optionsWithThresholds; +- } +- catch (Exception e) +- { +- throw new RuntimeException(e); +- } +- +- return CompactionParams.create(klass, options); +- } +- +- // Should only be called on compact tables +- private static boolean checkNeedsUpgrade(Iterable defs, boolean isSuper, boolean isStaticCompactTable) +- { +- if (isSuper) +- { +- // Check if we've added the "supercolumn map" column yet or not +- for (UntypedResultSet.Row row : defs) +- if (row.getString("column_name").isEmpty()) +- return false; +- return true; +- } +- +- // For static compact tables, we need to upgrade if the regular definitions haven't been converted to static yet, +- // i.e. if we don't have a static definition yet. +- if (isStaticCompactTable) +- return !hasKind(defs, ColumnDefinition.Kind.STATIC); +- +- // For dense compact tables, we need to upgrade if we don't have a compact value definition +- return !hasRegularColumns(defs); +- } +- +- private static boolean hasRegularColumns(Iterable columnRows) +- { +- for (UntypedResultSet.Row row : columnRows) +- { +- /* +- * We need to special case and ignore the empty compact column (pre-3.0, COMPACT STORAGE, primary-key only tables), +- * since deserializeKind() will otherwise just return a REGULAR. +- * We want the proper EmptyType regular column to be added by addDefinitionForUpgrade(), so we need +- * checkNeedsUpgrade() to return true in this case. +- * See CASSANDRA-9874. +- */ +- if (isEmptyCompactValueColumn(row)) +- return false; +- +- if (deserializeKind(row.getString("type")) == ColumnDefinition.Kind.REGULAR) +- return true; +- } +- +- return false; +- } +- +- private static boolean isEmptyCompactValueColumn(UntypedResultSet.Row row) +- { +- return "compact_value".equals(row.getString("type")) && row.getString("column_name").isEmpty(); +- } +- +- private static void addDefinitionForUpgrade(List defs, +- String ksName, +- String cfName, +- boolean isStaticCompactTable, +- boolean isSuper, +- AbstractType rawComparator, +- AbstractType subComparator, +- AbstractType defaultValidator) +- { +- CompactTables.DefaultNames names = CompactTables.defaultNameGenerator(defs); +- +- if (isSuper) +- { +- defs.add(ColumnDefinition.regularDef(ksName, cfName, CompactTables.SUPER_COLUMN_MAP_COLUMN_STR, MapType.getInstance(subComparator, defaultValidator, true))); +- } +- else if (isStaticCompactTable) +- { +- defs.add(ColumnDefinition.clusteringDef(ksName, cfName, names.defaultClusteringName(), rawComparator, 0)); +- defs.add(ColumnDefinition.regularDef(ksName, cfName, names.defaultCompactValueName(), defaultValidator)); +- } +- else +- { +- // For dense compact tables, we get here if we don't have a compact value column, in which case we should add it +- // (we use EmptyType to recognize that the compact value was not declared by the use (see CreateTableStatement too)) +- defs.add(ColumnDefinition.regularDef(ksName, cfName, names.defaultCompactValueName(), EmptyType.instance)); +- } +- } +- +- private static boolean hasKind(Iterable defs, ColumnDefinition.Kind kind) +- { +- for (UntypedResultSet.Row row : defs) +- if (deserializeKind(row.getString("type")) == kind) +- return true; +- +- return false; +- } +- +- /* +- * Prior to 3.0 we used to not store the type of the dropped columns, relying on all collection info being +- * present in the comparator, forever. That allowed us to perform certain validations in AlterTableStatement +- * (namely not allowing to re-add incompatible collection columns, with the same name, but a different type). +- * +- * In 3.0, we no longer preserve the original comparator, and reconstruct it from the columns instead. That means +- * that we should preserve the type of the dropped columns now, and, during migration, fetch the types from +- * the original comparator if necessary. +- */ +- private static void addDroppedColumns(CFMetaData cfm, AbstractType comparator, Map droppedTimes) +- { +- AbstractType last = comparator.getComponents().get(comparator.componentsCount() - 1); +- Map collections = last instanceof ColumnToCollectionType +- ? ((ColumnToCollectionType) last).defined +- : Collections.emptyMap(); +- +- for (Map.Entry entry : droppedTimes.entrySet()) +- { +- String name = entry.getKey(); +- ByteBuffer nameBytes = UTF8Type.instance.decompose(name); +- long time = entry.getValue(); +- +- AbstractType type = collections.containsKey(nameBytes) +- ? collections.get(nameBytes) +- : BytesType.instance; +- +- cfm.getDroppedColumns().put(nameBytes, new CFMetaData.DroppedColumn(name, type, time)); +- } +- } +- +- private static List createColumnsFromColumnRows(Iterable rows, +- String keyspace, +- String table, +- AbstractType rawComparator, +- AbstractType rawSubComparator, +- boolean isSuper, +- boolean isCQLTable, +- boolean isStaticCompactTable, +- boolean needsUpgrade) +- { +- List columns = new ArrayList<>(); +- +- for (UntypedResultSet.Row row : rows) +- { +- // Skip the empty compact value column. Make addDefinitionForUpgrade() re-add the proper REGULAR one. +- if (isEmptyCompactValueColumn(row)) +- continue; +- +- columns.add(createColumnFromColumnRow(row, +- keyspace, +- table, +- rawComparator, +- rawSubComparator, +- isSuper, +- isCQLTable, +- isStaticCompactTable, +- needsUpgrade)); +- } +- +- return columns; +- } +- +- private static ColumnDefinition createColumnFromColumnRow(UntypedResultSet.Row row, +- String keyspace, +- String table, +- AbstractType rawComparator, +- AbstractType rawSubComparator, +- boolean isSuper, +- boolean isCQLTable, +- boolean isStaticCompactTable, +- boolean needsUpgrade) +- { +- String rawKind = row.getString("type"); +- +- ColumnDefinition.Kind kind = deserializeKind(rawKind); +- if (needsUpgrade && isStaticCompactTable && kind == ColumnDefinition.Kind.REGULAR) +- kind = ColumnDefinition.Kind.STATIC; +- +- int componentIndex = ColumnDefinition.NO_POSITION; +- // Note that the component_index is not useful for non-primary key parts (it never really in fact since there is +- // no particular ordering of non-PK columns, we only used to use it as a simplification but that's not needed +- // anymore) +- if (kind.isPrimaryKeyKind()) +- // We use to not have a component index when there was a single partition key, we don't anymore (#10491) +- componentIndex = row.has("component_index") ? row.getInt("component_index") : 0; +- +- // Note: we save the column name as string, but we should not assume that it is an UTF8 name, we +- // we need to use the comparator fromString method +- AbstractType comparator = isCQLTable +- ? UTF8Type.instance +- : CompactTables.columnDefinitionComparator(rawKind, isSuper, rawComparator, rawSubComparator); +- ColumnIdentifier name = ColumnIdentifier.getInterned(comparator.fromString(row.getString("column_name")), comparator); +- +- AbstractType validator = parseType(row.getString("validator")); +- +- // In the 2.x schema we didn't store UDT's with a FrozenType wrapper because they were implicitly frozen. After +- // CASSANDRA-7423 (non-frozen UDTs), this is no longer true, so we need to freeze UDTs and nested freezable +- // types (UDTs and collections) to properly migrate the schema. See CASSANDRA-11609 and CASSANDRA-11613. +- if (validator.isUDT() && validator.isMultiCell()) +- validator = validator.freeze(); +- else +- validator = validator.freezeNestedMulticellTypes(); +- +- return new ColumnDefinition(keyspace, table, name, validator, componentIndex, kind); +- } +- +- private static Indexes createIndexesFromColumnRows(CFMetaData cfm, +- Iterable rows, +- String keyspace, +- String table, +- AbstractType rawComparator, +- AbstractType rawSubComparator, +- boolean isSuper, +- boolean isCQLTable, +- boolean isStaticCompactTable, +- boolean needsUpgrade) +- { +- Indexes.Builder indexes = Indexes.builder(); +- +- for (UntypedResultSet.Row row : rows) +- { +- IndexMetadata.Kind kind = null; +- if (row.has("index_type")) +- kind = IndexMetadata.Kind.valueOf(row.getString("index_type")); +- +- if (kind == null) +- continue; +- +- Map indexOptions = null; +- if (row.has("index_options")) +- indexOptions = fromJsonMap(row.getString("index_options")); +- +- if (row.has("index_name")) +- { +- String indexName = row.getString("index_name"); +- +- ColumnDefinition column = createColumnFromColumnRow(row, +- keyspace, +- table, +- rawComparator, +- rawSubComparator, +- isSuper, +- isCQLTable, +- isStaticCompactTable, +- needsUpgrade); +- +- indexes.add(IndexMetadata.fromLegacyMetadata(cfm, column, indexName, kind, indexOptions)); +- } +- else +- { +- logger.error("Failed to find index name for legacy migration of index on {}.{}", keyspace, table); +- } +- } +- +- return indexes.build(); +- } +- +- private static ColumnDefinition.Kind deserializeKind(String kind) +- { +- if ("clustering_key".equalsIgnoreCase(kind)) +- return ColumnDefinition.Kind.CLUSTERING; +- +- if ("compact_value".equalsIgnoreCase(kind)) +- return ColumnDefinition.Kind.REGULAR; +- +- return Enum.valueOf(ColumnDefinition.Kind.class, kind.toUpperCase()); +- } +- +- private static Triggers createTriggersFromTriggerRows(UntypedResultSet rows) +- { +- Triggers.Builder triggers = org.apache.cassandra.schema.Triggers.builder(); +- rows.forEach(row -> triggers.add(createTriggerFromTriggerRow(row))); +- return triggers.build(); +- } +- +- private static TriggerMetadata createTriggerFromTriggerRow(UntypedResultSet.Row row) +- { +- String name = row.getString("trigger_name"); +- String classOption = row.getTextMap("trigger_options").get("class"); +- return new TriggerMetadata(name, classOption); +- } +- +- /* +- * Reading user types +- */ +- +- private static Collection readTypes(String keyspaceName) +- { +- String query = format("SELECT type_name FROM %s.%s WHERE keyspace_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_USERTYPES); +- Collection typeNames = new ArrayList<>(); +- query(query, keyspaceName).forEach(row -> typeNames.add(row.getString("type_name"))); +- +- Collection types = new ArrayList<>(); +- typeNames.forEach(name -> types.add(readType(keyspaceName, name))); +- return types; +- } +- +- private static Type readType(String keyspaceName, String typeName) +- { +- long timestamp = readTypeTimestamp(keyspaceName, typeName); +- UserType metadata = readTypeMetadata(keyspaceName, typeName); +- return new Type(timestamp, metadata); +- } +- +- /* +- * Unfortunately there is not a single REGULAR column in system.schema_usertypes, so annoyingly we cannot +- * use the writeTime() CQL function, and must resort to a lower level. +- */ +- private static long readTypeTimestamp(String keyspaceName, String typeName) +- { +- ColumnFamilyStore store = org.apache.cassandra.db.Keyspace.open(SystemKeyspace.NAME) +- .getColumnFamilyStore(SystemKeyspace.LEGACY_USERTYPES); +- +- ClusteringComparator comparator = store.metadata.comparator; +- Slices slices = Slices.with(comparator, Slice.make(comparator, typeName)); +- int nowInSec = FBUtilities.nowInSeconds(); +- DecoratedKey key = store.metadata.decorateKey(AsciiType.instance.fromString(keyspaceName)); +- SinglePartitionReadCommand command = SinglePartitionReadCommand.create(store.metadata, nowInSec, key, slices); +- +- try (ReadExecutionController controller = command.executionController(); +- RowIterator partition = UnfilteredRowIterators.filter(command.queryMemtableAndDisk(store, controller), nowInSec)) +- { +- return partition.next().primaryKeyLivenessInfo().timestamp(); +- } +- } +- +- private static UserType readTypeMetadata(String keyspaceName, String typeName) +- { +- String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND type_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_USERTYPES); +- UntypedResultSet.Row row = query(query, keyspaceName, typeName).one(); +- +- List names = +- row.getList("field_names", UTF8Type.instance) +- .stream() +- .map(t -> FieldIdentifier.forInternalString(t)) +- .collect(Collectors.toList()); +- +- List> types = +- row.getList("field_types", UTF8Type.instance) +- .stream() +- .map(LegacySchemaMigrator::parseType) +- .collect(Collectors.toList()); +- +- return new UserType(keyspaceName, bytes(typeName), names, types, true); +- } +- +- /* +- * Reading UDFs +- */ +- +- private static Collection readFunctions(String keyspaceName) +- { +- String query = format("SELECT function_name, signature FROM %s.%s WHERE keyspace_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_FUNCTIONS); +- HashMultimap> functionSignatures = HashMultimap.create(); +- query(query, keyspaceName).forEach(row -> functionSignatures.put(row.getString("function_name"), row.getList("signature", UTF8Type.instance))); +- +- Collection functions = new ArrayList<>(); +- functionSignatures.entries().forEach(pair -> functions.add(readFunction(keyspaceName, pair.getKey(), pair.getValue()))); +- return functions; +- } +- +- private static Function readFunction(String keyspaceName, String functionName, List signature) +- { +- long timestamp = readFunctionTimestamp(keyspaceName, functionName, signature); +- UDFunction metadata = readFunctionMetadata(keyspaceName, functionName, signature); +- return new Function(timestamp, metadata); +- } +- +- private static long readFunctionTimestamp(String keyspaceName, String functionName, List signature) +- { +- String query = format("SELECT writeTime(return_type) AS timestamp " + +- "FROM %s.%s " + +- "WHERE keyspace_name = ? AND function_name = ? AND signature = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_FUNCTIONS); +- return query(query, keyspaceName, functionName, signature).one().getLong("timestamp"); +- } +- +- private static UDFunction readFunctionMetadata(String keyspaceName, String functionName, List signature) +- { +- String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND function_name = ? AND signature = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_FUNCTIONS); +- UntypedResultSet.Row row = query(query, keyspaceName, functionName, signature).one(); +- +- FunctionName name = new FunctionName(keyspaceName, functionName); +- +- List argNames = new ArrayList<>(); +- if (row.has("argument_names")) +- for (String arg : row.getList("argument_names", UTF8Type.instance)) +- argNames.add(new ColumnIdentifier(arg, true)); +- +- List> argTypes = new ArrayList<>(); +- if (row.has("argument_types")) +- for (String type : row.getList("argument_types", UTF8Type.instance)) +- argTypes.add(parseType(type)); +- +- AbstractType returnType = parseType(row.getString("return_type")); +- +- String language = row.getString("language"); +- String body = row.getString("body"); +- boolean calledOnNullInput = row.getBoolean("called_on_null_input"); +- +- try +- { +- return UDFunction.create(name, argNames, argTypes, returnType, calledOnNullInput, language, body); +- } +- catch (InvalidRequestException e) +- { +- return UDFunction.createBrokenFunction(name, argNames, argTypes, returnType, calledOnNullInput, language, body, e); +- } +- } +- +- /* +- * Reading UDAs +- */ +- +- private static Collection readAggregates(Functions functions, String keyspaceName) +- { +- String query = format("SELECT aggregate_name, signature FROM %s.%s WHERE keyspace_name = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_AGGREGATES); +- HashMultimap> aggregateSignatures = HashMultimap.create(); +- query(query, keyspaceName).forEach(row -> aggregateSignatures.put(row.getString("aggregate_name"), row.getList("signature", UTF8Type.instance))); +- +- Collection aggregates = new ArrayList<>(); +- aggregateSignatures.entries().forEach(pair -> aggregates.add(readAggregate(functions, keyspaceName, pair.getKey(), pair.getValue()))); +- return aggregates; +- } +- +- private static Aggregate readAggregate(Functions functions, String keyspaceName, String aggregateName, List signature) +- { +- long timestamp = readAggregateTimestamp(keyspaceName, aggregateName, signature); +- UDAggregate metadata = readAggregateMetadata(functions, keyspaceName, aggregateName, signature); +- return new Aggregate(timestamp, metadata); +- } +- +- private static long readAggregateTimestamp(String keyspaceName, String aggregateName, List signature) +- { +- String query = format("SELECT writeTime(return_type) AS timestamp " + +- "FROM %s.%s " + +- "WHERE keyspace_name = ? AND aggregate_name = ? AND signature = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_AGGREGATES); +- return query(query, keyspaceName, aggregateName, signature).one().getLong("timestamp"); +- } +- +- private static UDAggregate readAggregateMetadata(Functions functions, String keyspaceName, String functionName, List signature) +- { +- String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND aggregate_name = ? AND signature = ?", +- SystemKeyspace.NAME, +- SystemKeyspace.LEGACY_AGGREGATES); +- UntypedResultSet.Row row = query(query, keyspaceName, functionName, signature).one(); +- +- FunctionName name = new FunctionName(keyspaceName, functionName); +- +- List types = row.getList("argument_types", UTF8Type.instance); +- +- List> argTypes = new ArrayList<>(); +- if (types != null) +- { +- argTypes = new ArrayList<>(types.size()); +- for (String type : types) +- argTypes.add(parseType(type)); +- } +- +- AbstractType returnType = parseType(row.getString("return_type")); +- +- FunctionName stateFunc = new FunctionName(keyspaceName, row.getString("state_func")); +- AbstractType stateType = parseType(row.getString("state_type")); +- FunctionName finalFunc = row.has("final_func") ? new FunctionName(keyspaceName, row.getString("final_func")) : null; +- ByteBuffer initcond = row.has("initcond") ? row.getBytes("initcond") : null; +- +- try +- { +- return UDAggregate.create(functions, name, argTypes, returnType, stateFunc, finalFunc, stateType, initcond); +- } +- catch (InvalidRequestException reason) +- { +- return UDAggregate.createBroken(name, argTypes, returnType, initcond, reason); +- } +- } +- +- private static UntypedResultSet query(String query, Object... values) +- { +- return QueryProcessor.executeOnceInternal(query, values); +- } +- +- private static AbstractType parseType(String str) +- { +- return TypeParser.parse(str); +- } +- +- private static final class Keyspace +- { +- final long timestamp; +- final String name; +- final KeyspaceParams params; +- final Collection
    tables; +- final Collection types; +- final Collection functions; +- final Collection aggregates; +- +- Keyspace(long timestamp, +- String name, +- KeyspaceParams params, +- Collection
    tables, +- Collection types, +- Collection functions, +- Collection aggregates) +- { +- this.timestamp = timestamp; +- this.name = name; +- this.params = params; +- this.tables = tables; +- this.types = types; +- this.functions = functions; +- this.aggregates = aggregates; +- } +- } +- +- private static final class Table +- { +- final long timestamp; +- final CFMetaData metadata; +- +- Table(long timestamp, CFMetaData metadata) +- { +- this.timestamp = timestamp; +- this.metadata = metadata; +- } +- } +- +- private static final class Type +- { +- final long timestamp; +- final UserType metadata; +- +- Type(long timestamp, UserType metadata) +- { +- this.timestamp = timestamp; +- this.metadata = metadata; +- } +- } +- +- private static final class Function +- { +- final long timestamp; +- final UDFunction metadata; +- +- Function(long timestamp, UDFunction metadata) +- { +- this.timestamp = timestamp; +- this.metadata = metadata; +- } +- } +- +- private static final class Aggregate +- { +- final long timestamp; +- final UDAggregate metadata; +- +- Aggregate(long timestamp, UDAggregate metadata) +- { +- this.timestamp = timestamp; +- this.metadata = metadata; +- } +- } +-} +diff --git a/src/java/org/apache/cassandra/service/CacheService.java b/src/java/org/apache/cassandra/service/CacheService.java +index 625d687..762c6b2 100644 +--- a/src/java/org/apache/cassandra/service/CacheService.java ++++ b/src/java/org/apache/cassandra/service/CacheService.java +@@ -40,7 +40,6 @@ import org.apache.cassandra.cache.*; + import org.apache.cassandra.cache.AutoSavingCache.CacheSerializer; + import org.apache.cassandra.concurrent.Stage; + import org.apache.cassandra.concurrent.StageManager; +-import org.apache.cassandra.config.ColumnDefinition; + import org.apache.cassandra.config.DatabaseDescriptor; + import org.apache.cassandra.db.*; + import org.apache.cassandra.db.rows.*; +@@ -50,7 +49,6 @@ import org.apache.cassandra.db.partitions.CachedPartition; + import org.apache.cassandra.db.context.CounterContext; + import org.apache.cassandra.io.util.DataInputPlus; + import org.apache.cassandra.io.util.DataOutputPlus; +-import org.apache.cassandra.io.sstable.format.big.BigFormat; + import org.apache.cassandra.utils.ByteBufferUtil; + import org.apache.cassandra.utils.FBUtilities; + import org.apache.cassandra.utils.Pair; +@@ -358,58 +356,25 @@ public class CacheService implements CacheServiceMBean + { + assert(cfs.metadata.isCounter()); + out.write(cfs.metadata.ksAndCFBytes); +- ByteBufferUtil.writeWithLength(key.partitionKey, out); +- ByteBufferUtil.writeWithLength(key.cellName, out); ++ key.write(out); + } + + public Future> deserialize(DataInputPlus in, final ColumnFamilyStore cfs) throws IOException + { + //Keyspace and CF name are deserialized by AutoSaving cache and used to fetch the CFS provided as a + //parameter so they aren't deserialized here, even though they are serialized by this serializer +- final ByteBuffer partitionKey = ByteBufferUtil.readWithLength(in); +- final ByteBuffer cellName = ByteBufferUtil.readWithLength(in); ++ final CounterCacheKey cacheKey = CounterCacheKey.read(cfs.metadata.ksAndCFName, in); + if (cfs == null || !cfs.metadata.isCounter() || !cfs.isCounterCacheEnabled()) + return null; +- assert(cfs.metadata.isCounter()); ++ + return StageManager.getStage(Stage.READ).submit(new Callable>() + { + public Pair call() throws Exception + { +- DecoratedKey key = cfs.decorateKey(partitionKey); +- LegacyLayout.LegacyCellName name = LegacyLayout.decodeCellName(cfs.metadata, cellName); +- ColumnDefinition column = name.column; +- CellPath path = name.collectionElement == null ? null : CellPath.create(name.collectionElement); +- +- int nowInSec = FBUtilities.nowInSeconds(); +- ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); +- if (path == null) +- builder.add(column); +- else +- builder.select(column, path); +- +- ClusteringIndexFilter filter = new ClusteringIndexNamesFilter(FBUtilities.singleton(name.clustering, cfs.metadata.comparator), false); +- SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(cfs.metadata, nowInSec, key, builder.build(), filter); +- try (ReadExecutionController controller = cmd.executionController(); +- RowIterator iter = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec)) +- { +- Cell cell; +- if (column.isStatic()) +- { +- cell = iter.staticRow().getCell(column); +- } +- else +- { +- if (!iter.hasNext()) +- return null; +- cell = iter.next().getCell(column); +- } +- +- if (cell == null) +- return null; +- +- ClockAndCount clockAndCount = CounterContext.instance().getLocalClockAndCount(cell.value()); +- return Pair.create(CounterCacheKey.create(cfs.metadata.ksAndCFName, partitionKey, name.clustering, column, path), clockAndCount); +- } ++ ByteBuffer value = cacheKey.readCounterValue(cfs); ++ return value == null ++ ? null ++ : Pair.create(cacheKey, CounterContext.instance().getLocalClockAndCount(value)); + } + }); + } +@@ -492,7 +457,7 @@ public class CacheService implements CacheServiceMBean + // wrong is during upgrade, in which case we fail at deserialization. This is not a huge deal however since 1) this is unlikely enough that + // this won't affect many users (if any) and only once, 2) this doesn't prevent the node from starting and 3) CASSANDRA-10219 shows that this + // part of the code has been broken for a while without anyone noticing (it is, btw, still broken until CASSANDRA-10219 is fixed). +- RowIndexEntry.Serializer.skipForCache(input, BigFormat.instance.getLatestVersion()); ++ RowIndexEntry.Serializer.skipForCache(input); + return null; + } + RowIndexEntry.IndexSerializer indexSerializer = reader.descriptor.getFormat().getIndexSerializer(reader.metadata, +diff --git a/src/java/org/apache/cassandra/service/CassandraDaemon.java b/src/java/org/apache/cassandra/service/CassandraDaemon.java +index cd98af0..92d12f2 100644 +--- a/src/java/org/apache/cassandra/service/CassandraDaemon.java ++++ b/src/java/org/apache/cassandra/service/CassandraDaemon.java +@@ -68,8 +68,6 @@ import org.apache.cassandra.io.util.FileUtils; + import org.apache.cassandra.metrics.CassandraMetricsRegistry; + import org.apache.cassandra.metrics.DefaultNameFactory; + import org.apache.cassandra.metrics.StorageMetrics; +-import org.apache.cassandra.schema.LegacySchemaMigrator; +-import org.apache.cassandra.thrift.ThriftServer; + import org.apache.cassandra.tracing.Tracing; + import org.apache.cassandra.utils.*; + +@@ -154,7 +152,6 @@ public class CassandraDaemon + + private static final CassandraDaemon instance = new CassandraDaemon(); + +- public Server thriftServer; + private NativeTransportService nativeTransportService; + private JMXConnectorServer jmxServer; + +@@ -246,13 +243,6 @@ public class CassandraDaemon + } + }); + +- /* +- * Migrate pre-3.0 keyspaces, tables, types, functions, and aggregates, to their new 3.0 storage. +- * We don't (and can't) wait for commit log replay here, but we don't need to - all schema changes force +- * explicit memtable flushes. +- */ +- LegacySchemaMigrator.migrate(); +- + // Populate token metadata before flushing, for token-aware sstable partitioning (#6696) + StorageService.instance.populateTokenMetadata(); + +@@ -417,12 +407,6 @@ public class CassandraDaemon + if (sizeRecorderInterval > 0) + ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(SizeEstimatesRecorder.instance, 30, sizeRecorderInterval, TimeUnit.SECONDS); + +- // Thrift +- InetAddress rpcAddr = DatabaseDescriptor.getRpcAddress(); +- int rpcPort = DatabaseDescriptor.getRpcPort(); +- int listenBacklog = DatabaseDescriptor.getRpcListenBacklog(); +- thriftServer = new ThriftServer(rpcAddr, rpcPort, listenBacklog); +- + // Native transport + nativeTransportService = new NativeTransportService(); + +@@ -513,12 +497,6 @@ public class CassandraDaemon + } + else + logger.info("Not starting native transport as requested. Use JMX (StorageService->startNativeTransport()) or nodetool (enablebinary) to start it"); +- +- String rpcFlag = System.getProperty("cassandra.start_rpc"); +- if ((rpcFlag != null && Boolean.parseBoolean(rpcFlag)) || (rpcFlag == null && DatabaseDescriptor.startRpc())) +- thriftServer.start(); +- else +- logger.info("Not starting RPC server as requested. Use JMX (StorageService->startRPCServer()) or nodetool (enablethrift) to start it"); + } + + /** +@@ -531,8 +509,6 @@ public class CassandraDaemon + // On linux, this doesn't entirely shut down Cassandra, just the RPC server. + // jsvc takes care of taking the rest down + logger.info("Cassandra shutting down..."); +- if (thriftServer != null) +- thriftServer.stop(); + if (nativeTransportService != null) + nativeTransportService.destroy(); + StorageService.instance.setRpcReady(false); +diff --git a/src/java/org/apache/cassandra/service/ClientState.java b/src/java/org/apache/cassandra/service/ClientState.java +index b131701..d3aab64 100644 +--- a/src/java/org/apache/cassandra/service/ClientState.java ++++ b/src/java/org/apache/cassandra/service/ClientState.java +@@ -34,13 +34,13 @@ import org.apache.cassandra.config.DatabaseDescriptor; + import org.apache.cassandra.config.Schema; + import org.apache.cassandra.cql3.QueryHandler; + import org.apache.cassandra.cql3.QueryProcessor; ++import org.apache.cassandra.cql3.Validation; + import org.apache.cassandra.cql3.functions.Function; + import org.apache.cassandra.db.SystemKeyspace; + import org.apache.cassandra.exceptions.AuthenticationException; + import org.apache.cassandra.exceptions.InvalidRequestException; + import org.apache.cassandra.exceptions.UnauthorizedException; + import org.apache.cassandra.schema.SchemaKeyspace; +-import org.apache.cassandra.thrift.ThriftValidation; + import org.apache.cassandra.tracing.TraceKeyspace; + import org.apache.cassandra.utils.FBUtilities; + import org.apache.cassandra.utils.JVMStabilityInspector; +@@ -148,7 +148,7 @@ public class ClientState + } + + /** +- * @return a ClientState object for external clients (thrift/native protocol users). ++ * @return a ClientState object for external clients (native protocol users). + */ + public static ClientState forExternalCalls(SocketAddress remoteAddress) + { +@@ -290,7 +290,7 @@ public class ClientState + public void hasColumnFamilyAccess(String keyspace, String columnFamily, Permission perm) + throws UnauthorizedException, InvalidRequestException + { +- ThriftValidation.validateColumnFamily(keyspace, columnFamily); ++ Validation.validateColumnFamily(keyspace, columnFamily); + hasAccess(keyspace, perm, DataResource.table(keyspace, columnFamily)); + } + +diff --git a/src/java/org/apache/cassandra/service/EmbeddedCassandraService.java b/src/java/org/apache/cassandra/service/EmbeddedCassandraService.java +index 659d851..f852b08 100644 +--- a/src/java/org/apache/cassandra/service/EmbeddedCassandraService.java ++++ b/src/java/org/apache/cassandra/service/EmbeddedCassandraService.java +@@ -22,8 +22,7 @@ import java.io.IOException; + import org.apache.cassandra.service.CassandraDaemon; + + /** +- * An embedded, in-memory cassandra storage service that listens +- * on the thrift interface as configured in cassandra.yaml ++ * An embedded, in-memory cassandra storage service. + * This kind of service is useful when running unit tests of + * services using cassandra for example. + * +diff --git a/src/java/org/apache/cassandra/service/StorageProxy.java b/src/java/org/apache/cassandra/service/StorageProxy.java +index 6375be6..b956e68 100644 +--- a/src/java/org/apache/cassandra/service/StorageProxy.java ++++ b/src/java/org/apache/cassandra/service/StorageProxy.java +@@ -1851,9 +1851,10 @@ public class StorageProxy implements StorageProxyMBean + } + + /** +- * Estimate the number of result rows (either cql3 rows or "thrift" rows, as called for by the command) per +- * range in the ring based on our local data. This assumes that ranges are uniformly distributed across the cluster +- * and that the queried data is also uniformly distributed. ++ * Estimate the number of result rows per range in the ring based on our local data. ++ *

    ++ * This assumes that ranges are uniformly distributed across the cluster and ++ * that the queried data is also uniformly distributed. + */ + private static float estimateResultsPerRange(PartitionRangeReadCommand command, Keyspace keyspace) + { +diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java +index 9289ce7..127ca30 100644 +--- a/src/java/org/apache/cassandra/service/StorageService.java ++++ b/src/java/org/apache/cassandra/service/StorageService.java +@@ -80,9 +80,6 @@ import org.apache.cassandra.service.paxos.CommitVerbHandler; + import org.apache.cassandra.service.paxos.PrepareVerbHandler; + import org.apache.cassandra.service.paxos.ProposeVerbHandler; + import org.apache.cassandra.streaming.*; +-import org.apache.cassandra.thrift.EndpointDetails; +-import org.apache.cassandra.thrift.TokenRange; +-import org.apache.cassandra.thrift.cassandraConstants; + import org.apache.cassandra.tracing.TraceKeyspace; + import org.apache.cassandra.utils.*; + import org.apache.cassandra.utils.progress.ProgressEvent; +@@ -321,35 +318,6 @@ public class StorageService extends NotificationBroadcasterSupport implements IE + return Gossiper.instance.isEnabled(); + } + +- // should only be called via JMX +- public void startRPCServer() +- { +- if (daemon == null) +- { +- throw new IllegalStateException("No configured daemon"); +- } +- daemon.thriftServer.start(); +- } +- +- public void stopRPCServer() +- { +- if (daemon == null) +- { +- throw new IllegalStateException("No configured daemon"); +- } +- if (daemon.thriftServer != null) +- daemon.thriftServer.stop(); +- } +- +- public boolean isRPCServerRunning() +- { +- if ((daemon == null) || (daemon.thriftServer == null)) +- { +- return false; +- } +- return daemon.thriftServer.isRunning(); +- } +- + public void startNativeTransport() + { + if (daemon == null) +@@ -392,11 +360,6 @@ public class StorageService extends NotificationBroadcasterSupport implements IE + logger.error("Stopping gossiper"); + stopGossiping(); + } +- if (isRPCServerRunning()) +- { +- logger.error("Stopping RPC server"); +- stopRPCServer(); +- } + if (isNativeTransportRunning()) + { + logger.error("Stopping native transport"); +@@ -406,7 +369,6 @@ public class StorageService extends NotificationBroadcasterSupport implements IE + + private void shutdownClientServers() + { +- stopRPCServer(); + stopNativeTransport(); + } + +@@ -551,7 +513,6 @@ public class StorageService extends NotificationBroadcasterSupport implements IE + public synchronized void initServer(int delay) throws ConfigurationException + { + logger.info("Cassandra version: {}", FBUtilities.getReleaseVersionString()); +- logger.info("Thrift API version: {}", cassandraConstants.VERSION); + logger.info("CQL supported versions: {} (default: {})", + StringUtils.join(ClientState.getCQLSupportedVersion(), ","), ClientState.DEFAULT_CQL_VERSION); + +@@ -1675,32 +1636,7 @@ public class StorageService extends NotificationBroadcasterSupport implements IE + : getRangeToAddressMap(keyspace); + + for (Map.Entry, List> entry : rangeToAddressMap.entrySet()) +- { +- Range range = entry.getKey(); +- List addresses = entry.getValue(); +- List endpoints = new ArrayList<>(addresses.size()); +- List rpc_endpoints = new ArrayList<>(addresses.size()); +- List epDetails = new ArrayList<>(addresses.size()); +- +- for (InetAddress endpoint : addresses) +- { +- EndpointDetails details = new EndpointDetails(); +- details.host = endpoint.getHostAddress(); +- details.datacenter = DatabaseDescriptor.getEndpointSnitch().getDatacenter(endpoint); +- details.rack = DatabaseDescriptor.getEndpointSnitch().getRack(endpoint); +- +- endpoints.add(details.host); +- rpc_endpoints.add(getRpcaddress(endpoint)); +- +- epDetails.add(details); +- } +- +- TokenRange tr = new TokenRange(tf.toString(range.left.getToken()), tf.toString(range.right.getToken()), endpoints) +- .setEndpoint_details(epDetails) +- .setRpc_endpoints(rpc_endpoints); +- +- ranges.add(tr); +- } ++ ranges.add(TokenRange.create(tf, entry.getKey(), entry.getValue())); + + return ranges; + } +diff --git a/src/java/org/apache/cassandra/service/StorageServiceMBean.java b/src/java/org/apache/cassandra/service/StorageServiceMBean.java +index f7da817..9cd872b 100644 +--- a/src/java/org/apache/cassandra/service/StorageServiceMBean.java ++++ b/src/java/org/apache/cassandra/service/StorageServiceMBean.java +@@ -481,15 +481,6 @@ public interface StorageServiceMBean extends NotificationEmitter + // to determine if initialization has completed + public boolean isInitialized(); + +- // allows a user to disable thrift +- public void stopRPCServer(); +- +- // allows a user to reenable thrift +- public void startRPCServer(); +- +- // to determine if thrift is running +- public boolean isRPCServerRunning(); +- + public void stopNativeTransport(); + public void startNativeTransport(); + public boolean isNativeTransportRunning(); +@@ -590,7 +581,7 @@ public interface StorageServiceMBean extends NotificationEmitter + public void resetLocalSchema() throws IOException; + + /** +- * Enables/Disables tracing for the whole system. Only thrift requests can start tracing currently. ++ * Enables/Disables tracing for the whole system. + * + * @param probability + * ]0,1[ will enable tracing on a partial number of requests with the provided probability. 0 will +diff --git a/src/java/org/apache/cassandra/service/TokenRange.java b/src/java/org/apache/cassandra/service/TokenRange.java +new file mode 100644 +index 0000000..0e46910 +--- /dev/null ++++ b/src/java/org/apache/cassandra/service/TokenRange.java +@@ -0,0 +1,119 @@ ++/* ++ * Licensed to the Apache Software Foundation (ASF) under one ++ * or more contributor license agreements. See the NOTICE file ++ * distributed with this work for additional information ++ * regarding copyright ownership. The ASF licenses this file ++ * to you under the Apache License, Version 2.0 (the ++ * "License"); you may not use this file except in compliance ++ * with the License. You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++package org.apache.cassandra.service; ++ ++import java.net.InetAddress; ++import java.util.*; ++ ++import org.apache.cassandra.config.DatabaseDescriptor; ++import org.apache.cassandra.dht.Range; ++import org.apache.cassandra.dht.Token; ++import org.apache.cassandra.locator.IEndpointSnitch; ++ ++/** ++ * Holds token range informations for the sake of {@link StorageService#describeRing}. ++ * ++ * This class mostly exists for the sake of {@link StorageService#describeRing}, ++ * which used to rely on a thrift class which this is the equivalent of. This is ++ * the reason this class behave how it does and the reason for the format ++ * of {@code toString()} in particular (used by ++ * {@link StorageService#describeRingJMX}). This class probably have no other ++ * good uses than providing backward compatibility. ++ */ ++public class TokenRange ++{ ++ private final Token.TokenFactory tokenFactory; ++ ++ public final Range range; ++ public final List endpoints; ++ ++ private TokenRange(Token.TokenFactory tokenFactory, Range range, List endpoints) ++ { ++ this.tokenFactory = tokenFactory; ++ this.range = range; ++ this.endpoints = endpoints; ++ } ++ ++ private String toStr(Token tk) ++ { ++ return tokenFactory.toString(tk); ++ } ++ ++ public static TokenRange create(Token.TokenFactory tokenFactory, Range range, List endpoints) ++ { ++ List details = new ArrayList<>(endpoints.size()); ++ IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch(); ++ for (InetAddress ep : endpoints) ++ details.add(new EndpointDetails(ep, ++ StorageService.instance.getRpcaddress(ep), ++ snitch.getDatacenter(ep), ++ snitch.getRack(ep))); ++ return new TokenRange(tokenFactory, range, details); ++ } ++ ++ @Override ++ public String toString() ++ { ++ StringBuilder sb = new StringBuilder("TokenRange("); ++ ++ sb.append("start_token:").append(toStr(range.left)); ++ sb.append(", end_token:").append(toStr(range.right)); ++ ++ List hosts = new ArrayList<>(endpoints.size()); ++ List rpcs = new ArrayList<>(endpoints.size()); ++ for (EndpointDetails ep : endpoints) ++ { ++ hosts.add(ep.host.getHostAddress()); ++ rpcs.add(ep.rpcAddress); ++ } ++ ++ sb.append("endpoints:").append(hosts); ++ sb.append("rpc_endpoints:").append(rpcs); ++ sb.append("endpoint_details:").append(endpoints); ++ ++ sb.append(")"); ++ return sb.toString(); ++ } ++ ++ public static class EndpointDetails ++ { ++ public final InetAddress host; ++ public final String rpcAddress; ++ public final String datacenter; ++ public final String rack; ++ ++ private EndpointDetails(InetAddress host, String rpcAddress, String datacenter, String rack) ++ { ++ // dc and rack can be null, but host shouldn't ++ assert host != null; ++ this.host = host; ++ this.rpcAddress = rpcAddress; ++ this.datacenter = datacenter; ++ this.rack = rack; ++ } ++ ++ @Override ++ public String toString() ++ { ++ // Format matters for backward compatibility with describeRing() ++ String dcStr = datacenter == null ? "" : String.format(", datacenter:%s", datacenter); ++ String rackStr = rack == null ? "" : String.format(", rack:%s", rack); ++ return String.format("EndpointDetails(host:%s%s%s)", host.getHostAddress(), dcStr, rackStr); ++ } ++ } ++} +diff --git a/src/java/org/apache/cassandra/service/pager/PagingState.java b/src/java/org/apache/cassandra/service/pager/PagingState.java +index 611523f..df91609 100644 +--- a/src/java/org/apache/cassandra/service/pager/PagingState.java ++++ b/src/java/org/apache/cassandra/service/pager/PagingState.java +@@ -23,10 +23,11 @@ import java.util.*; + + import org.apache.cassandra.config.CFMetaData; + import org.apache.cassandra.db.Clustering; +-import org.apache.cassandra.db.LegacyLayout; ++import org.apache.cassandra.db.CompactTables; + import org.apache.cassandra.db.TypeSizes; + import org.apache.cassandra.db.marshal.AbstractType; + import org.apache.cassandra.db.marshal.BytesType; ++import org.apache.cassandra.db.marshal.CompositeType; + import org.apache.cassandra.db.rows.Cell; + import org.apache.cassandra.db.rows.Row; + import org.apache.cassandra.io.util.DataInputBuffer; +@@ -213,12 +214,15 @@ public class PagingState + Iterator cells = row.cellsInLegacyOrder(metadata, true).iterator(); + if (!cells.hasNext()) + { +- mark = LegacyLayout.encodeClustering(metadata, row.clustering()); ++ // If the last returned row has no cell, this means in 2.1/2.2 terms that we stopped on the row ++ // marker. Note that this shouldn't happen if the table is COMPACT. ++ assert !metadata.isCompactTable(); ++ mark = encodeCellName(metadata, row.clustering(), ByteBufferUtil.EMPTY_BYTE_BUFFER, null); + } + else + { + Cell cell = cells.next(); +- mark = LegacyLayout.encodeCellName(metadata, row.clustering(), cell.column().name.bytes, cell.column().isComplex() ? cell.path().get(0) : null); ++ mark = encodeCellName(metadata, row.clustering(), cell.column().name.bytes, cell.column().isComplex() ? cell.path().get(0) : null); + } + } + else +@@ -236,10 +240,84 @@ public class PagingState + return null; + + return protocolVersion <= Server.VERSION_3 +- ? LegacyLayout.decodeClustering(metadata, mark) ++ ? decodeClustering(metadata, mark) + : Clustering.serializer.deserialize(mark, MessagingService.VERSION_30, makeClusteringTypes(metadata)); + } + ++ // Old (pre-3.0) encoding of cells. We need that for the protocol v3 as that is how things where encoded ++ private static ByteBuffer encodeCellName(CFMetaData metadata, Clustering clustering, ByteBuffer columnName, ByteBuffer collectionElement) ++ { ++ boolean isStatic = clustering == Clustering.STATIC_CLUSTERING; ++ ++ if (!metadata.isCompound()) ++ { ++ if (isStatic) ++ return columnName; ++ ++ assert clustering.size() == 1 : "Expected clustering size to be 1, but was " + clustering.size(); ++ return clustering.get(0); ++ } ++ ++ // We use comparator.size() rather than clustering.size() because of static clusterings ++ int clusteringSize = metadata.comparator.size(); ++ int size = clusteringSize + (metadata.isDense() ? 0 : 1) + (collectionElement == null ? 0 : 1); ++ if (metadata.isSuper()) ++ size = clusteringSize + 1; ++ ByteBuffer[] values = new ByteBuffer[size]; ++ for (int i = 0; i < clusteringSize; i++) ++ { ++ if (isStatic) ++ { ++ values[i] = ByteBufferUtil.EMPTY_BYTE_BUFFER; ++ continue; ++ } ++ ++ ByteBuffer v = clustering.get(i); ++ // we can have null (only for dense compound tables for backward compatibility reasons) but that ++ // means we're done and should stop there as far as building the composite is concerned. ++ if (v == null) ++ return CompositeType.build(Arrays.copyOfRange(values, 0, i)); ++ ++ values[i] = v; ++ } ++ ++ if (metadata.isSuper()) ++ { ++ // We need to set the "column" (in thrift terms) name, i.e. the value corresponding to the subcomparator. ++ // What it is depends if this a cell for a declared "static" column or a "dynamic" column part of the ++ // super-column internal map. ++ assert columnName != null; // This should never be null for supercolumns, see decodeForSuperColumn() above ++ values[clusteringSize] = columnName.equals(CompactTables.SUPER_COLUMN_MAP_COLUMN) ++ ? collectionElement ++ : columnName; ++ } ++ else ++ { ++ if (!metadata.isDense()) ++ values[clusteringSize] = columnName; ++ if (collectionElement != null) ++ values[clusteringSize + 1] = collectionElement; ++ } ++ ++ return CompositeType.build(isStatic, values); ++ } ++ ++ private static Clustering decodeClustering(CFMetaData metadata, ByteBuffer value) ++ { ++ int csize = metadata.comparator.size(); ++ if (csize == 0) ++ return Clustering.EMPTY; ++ ++ if (metadata.isCompound() && CompositeType.isStaticName(value)) ++ return Clustering.STATIC_CLUSTERING; ++ ++ List components = metadata.isCompound() ++ ? CompositeType.splitName(value) ++ : Collections.singletonList(value); ++ ++ return Clustering.make(components.subList(0, Math.min(csize, components.size())).toArray(new ByteBuffer[csize])); ++ } ++ + @Override + public final int hashCode() + { +diff --git a/src/java/org/apache/cassandra/service/pager/PartitionRangeQueryPager.java b/src/java/org/apache/cassandra/service/pager/PartitionRangeQueryPager.java +index 9c216e3..e9da2ae 100644 +--- a/src/java/org/apache/cassandra/service/pager/PartitionRangeQueryPager.java ++++ b/src/java/org/apache/cassandra/service/pager/PartitionRangeQueryPager.java +@@ -32,9 +32,6 @@ import org.apache.cassandra.schema.IndexMetadata; + + /** + * Pages a PartitionRangeReadCommand. +- * +- * Note: this only work for CQL3 queries for now (because thrift queries expect +- * a different limit on the rows than on the columns, which complicates it). + */ + public class PartitionRangeQueryPager extends AbstractQueryPager + { +diff --git a/src/java/org/apache/cassandra/service/pager/QueryPagers.java b/src/java/org/apache/cassandra/service/pager/QueryPagers.java +deleted file mode 100644 +index 02b5de2..0000000 +--- a/src/java/org/apache/cassandra/service/pager/QueryPagers.java ++++ /dev/null +@@ -1,65 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.service.pager; +- +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.db.*; +-import org.apache.cassandra.db.filter.*; +-import org.apache.cassandra.db.partitions.*; +-import org.apache.cassandra.exceptions.RequestExecutionException; +-import org.apache.cassandra.exceptions.RequestValidationException; +-import org.apache.cassandra.service.ClientState; +-import org.apache.cassandra.transport.Server; +- +-/** +- * Static utility methods for paging. +- */ +-public class QueryPagers +-{ +- private QueryPagers() {}; +- +- /** +- * Convenience method that count (live) cells/rows for a given slice of a row, but page underneath. +- */ +- public static int countPaged(CFMetaData metadata, +- DecoratedKey key, +- ColumnFilter columnFilter, +- ClusteringIndexFilter filter, +- DataLimits limits, +- ConsistencyLevel consistencyLevel, +- ClientState state, +- final int pageSize, +- int nowInSec, +- boolean isForThrift) throws RequestValidationException, RequestExecutionException +- { +- SinglePartitionReadCommand command = SinglePartitionReadCommand.create(isForThrift, metadata, nowInSec, columnFilter, RowFilter.NONE, limits, key, filter); +- final SinglePartitionPager pager = new SinglePartitionPager(command, null, Server.CURRENT_VERSION); +- +- int count = 0; +- while (!pager.isExhausted()) +- { +- try (PartitionIterator iter = pager.fetchPage(pageSize, consistencyLevel, state)) +- { +- DataLimits.Counter counter = limits.newCounter(nowInSec, true); +- PartitionIterators.consume(counter.applyTo(iter)); +- count += counter.counted(); +- } +- } +- return count; +- } +-} +diff --git a/src/java/org/apache/cassandra/service/pager/SinglePartitionPager.java b/src/java/org/apache/cassandra/service/pager/SinglePartitionPager.java +index acb55bb..2178b91 100644 +--- a/src/java/org/apache/cassandra/service/pager/SinglePartitionPager.java ++++ b/src/java/org/apache/cassandra/service/pager/SinglePartitionPager.java +@@ -71,8 +71,9 @@ public class SinglePartitionPager extends AbstractQueryPager + protected ReadCommand nextPageReadCommand(int pageSize) + { + Clustering clustering = lastReturned == null ? null : lastReturned.clustering(command.metadata()); +- DataLimits limits = (lastReturned == null || command.isForThrift()) ? limits().forPaging(pageSize) +- : limits().forPaging(pageSize, key(), remainingInPartition()); ++ DataLimits limits = lastReturned == null ++ ? limits().forPaging(pageSize) ++ : limits().forPaging(pageSize, key(), remainingInPartition()); + + return command.forPaging(clustering, limits); + } +diff --git a/src/java/org/apache/cassandra/thrift/CassandraServer.java b/src/java/org/apache/cassandra/thrift/CassandraServer.java +deleted file mode 100644 +index a189000..0000000 +--- a/src/java/org/apache/cassandra/thrift/CassandraServer.java ++++ /dev/null +@@ -1,2621 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.io.IOException; +-import java.nio.ByteBuffer; +-import java.nio.charset.CharacterCodingException; +-import java.nio.charset.StandardCharsets; +-import java.util.*; +-import java.util.concurrent.Callable; +-import java.util.concurrent.TimeoutException; +-import java.util.zip.DataFormatException; +-import java.util.zip.Inflater; +- +-import com.google.common.base.Joiner; +-import com.google.common.collect.*; +-import com.google.common.primitives.Longs; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.cassandra.auth.Permission; +-import org.apache.cassandra.config.*; +-import org.apache.cassandra.cql3.QueryOptions; +-import org.apache.cassandra.cql3.statements.ParsedStatement; +-import org.apache.cassandra.db.*; +-import org.apache.cassandra.db.context.CounterContext; +-import org.apache.cassandra.db.filter.*; +-import org.apache.cassandra.db.marshal.AbstractType; +-import org.apache.cassandra.db.marshal.TimeUUIDType; +-import org.apache.cassandra.db.partitions.*; +-import org.apache.cassandra.db.rows.*; +-import org.apache.cassandra.db.view.View; +-import org.apache.cassandra.dht.*; +-import org.apache.cassandra.dht.Range; +-import org.apache.cassandra.exceptions.*; +-import org.apache.cassandra.io.util.DataOutputBuffer; +-import org.apache.cassandra.locator.DynamicEndpointSnitch; +-import org.apache.cassandra.metrics.ClientMetrics; +-import org.apache.cassandra.scheduler.IRequestScheduler; +-import org.apache.cassandra.schema.KeyspaceMetadata; +-import org.apache.cassandra.serializers.MarshalException; +-import org.apache.cassandra.service.*; +-import org.apache.cassandra.service.pager.QueryPagers; +-import org.apache.cassandra.tracing.Tracing; +-import org.apache.cassandra.utils.*; +-import org.apache.cassandra.utils.btree.BTreeSet; +-import org.apache.thrift.TException; +- +-public class CassandraServer implements Cassandra.Iface +-{ +- private static final Logger logger = LoggerFactory.getLogger(CassandraServer.class); +- +- private final static int COUNT_PAGE_SIZE = 1024; +- +- private final static List EMPTY_COLUMNS = Collections.emptyList(); +- +- /* +- * RequestScheduler to perform the scheduling of incoming requests +- */ +- private final IRequestScheduler requestScheduler; +- +- public CassandraServer() +- { +- requestScheduler = DatabaseDescriptor.getRequestScheduler(); +- registerMetrics(); +- } +- +- public ThriftClientState state() +- { +- return ThriftSessionManager.instance.currentSession(); +- } +- +- protected PartitionIterator read(List commands, org.apache.cassandra.db.ConsistencyLevel consistency_level, ClientState cState) +- throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException +- { +- try +- { +- schedule(DatabaseDescriptor.getReadRpcTimeout()); +- try +- { +- return StorageProxy.read(new SinglePartitionReadCommand.Group(commands, DataLimits.NONE), consistency_level, cState); +- } +- finally +- { +- release(); +- } +- } +- catch (RequestExecutionException e) +- { +- throw ThriftConversion.rethrow(e); +- } +- } +- +- public List thriftifyColumns(CFMetaData metadata, Iterator cells) +- { +- ArrayList thriftColumns = new ArrayList<>(); +- while (cells.hasNext()) +- { +- LegacyLayout.LegacyCell cell = cells.next(); +- thriftColumns.add(thriftifyColumnWithName(metadata, cell, cell.name.encode(metadata))); +- } +- return thriftColumns; +- } +- +- private ColumnOrSuperColumn thriftifyColumnWithName(CFMetaData metadata, LegacyLayout.LegacyCell cell, ByteBuffer newName) +- { +- if (cell.isCounter()) +- return new ColumnOrSuperColumn().setCounter_column(thriftifySubCounter(metadata, cell).setName(newName)); +- else +- return new ColumnOrSuperColumn().setColumn(thriftifySubColumn(cell, newName)); +- } +- +- private Column thriftifySubColumn(CFMetaData metadata, LegacyLayout.LegacyCell cell) +- { +- return thriftifySubColumn(cell, cell.name.encode(metadata)); +- } +- +- private Column thriftifySubColumn(LegacyLayout.LegacyCell cell, ByteBuffer name) +- { +- assert !cell.isCounter(); +- +- Column thrift_column = new Column(name).setValue(cell.value).setTimestamp(cell.timestamp); +- if (cell.isExpiring()) +- thrift_column.setTtl(cell.ttl); +- return thrift_column; +- } +- +- private List thriftifyColumnsAsColumns(CFMetaData metadata, Iterator cells) +- { +- List thriftColumns = new ArrayList<>(); +- while (cells.hasNext()) +- thriftColumns.add(thriftifySubColumn(metadata, cells.next())); +- return thriftColumns; +- } +- +- private CounterColumn thriftifySubCounter(CFMetaData metadata, LegacyLayout.LegacyCell cell) +- { +- assert cell.isCounter(); +- return new CounterColumn(cell.name.encode(metadata), CounterContext.instance().total(cell.value)); +- } +- +- private List thriftifySuperColumns(CFMetaData metadata, +- Iterator cells, +- boolean subcolumnsOnly, +- boolean isCounterCF, +- boolean reversed) +- { +- if (subcolumnsOnly) +- { +- ArrayList thriftSuperColumns = new ArrayList<>(); +- while (cells.hasNext()) +- { +- LegacyLayout.LegacyCell cell = cells.next(); +- thriftSuperColumns.add(thriftifyColumnWithName(metadata, cell, cell.name.superColumnSubName())); +- } +- // Generally, cells come reversed if the query is reverse. However, this is not the case within a super column because +- // internally a super column is a map within a row and those are never returned reversed. +- if (reversed) +- Collections.reverse(thriftSuperColumns); +- return thriftSuperColumns; +- } +- else +- { +- if (isCounterCF) +- return thriftifyCounterSuperColumns(metadata, cells, reversed); +- else +- return thriftifySuperColumns(cells, reversed); +- } +- } +- +- private List thriftifySuperColumns(Iterator cells, boolean reversed) +- { +- ArrayList thriftSuperColumns = new ArrayList<>(); +- SuperColumn current = null; +- while (cells.hasNext()) +- { +- LegacyLayout.LegacyCell cell = cells.next(); +- ByteBuffer scName = cell.name.superColumnName(); +- if (current == null || !scName.equals(current.bufferForName())) +- { +- // Generally, cells come reversed if the query is reverse. However, this is not the case within a super column because +- // internally a super column is a map within a row and those are never returned reversed. +- if (current != null && reversed) +- Collections.reverse(current.columns); +- +- current = new SuperColumn(scName, new ArrayList<>()); +- thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(current)); +- } +- current.getColumns().add(thriftifySubColumn(cell, cell.name.superColumnSubName())); +- } +- +- if (current != null && reversed) +- Collections.reverse(current.columns); +- +- return thriftSuperColumns; +- } +- +- private List thriftifyCounterSuperColumns(CFMetaData metadata, Iterator cells, boolean reversed) +- { +- ArrayList thriftSuperColumns = new ArrayList<>(); +- CounterSuperColumn current = null; +- while (cells.hasNext()) +- { +- LegacyLayout.LegacyCell cell = cells.next(); +- ByteBuffer scName = cell.name.superColumnName(); +- if (current == null || !scName.equals(current.bufferForName())) +- { +- // Generally, cells come reversed if the query is reverse. However, this is not the case within a super column because +- // internally a super column is a map within a row and those are never returned reversed. +- if (current != null && reversed) +- Collections.reverse(current.columns); +- +- current = new CounterSuperColumn(scName, new ArrayList<>()); +- thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(current)); +- } +- current.getColumns().add(thriftifySubCounter(metadata, cell).setName(cell.name.superColumnSubName())); +- } +- return thriftSuperColumns; +- } +- +- private List thriftifyPartition(RowIterator partition, boolean subcolumnsOnly, boolean reversed, int cellLimit) +- { +- if (partition.isEmpty()) +- return EMPTY_COLUMNS; +- +- Iterator cells = LegacyLayout.fromRowIterator(partition).right; +- List result; +- if (partition.metadata().isSuper()) +- { +- boolean isCounterCF = partition.metadata().isCounter(); +- result = thriftifySuperColumns(partition.metadata(), cells, subcolumnsOnly, isCounterCF, reversed); +- } +- else +- { +- result = thriftifyColumns(partition.metadata(), cells); +- } +- +- // Thrift count cells, but internally we only count them at "row" boundaries, which means that if the limit stops in the middle +- // of an internal row we'll include a few additional cells. So trim it here. +- return result.size() > cellLimit +- ? result.subList(0, cellLimit) +- : result; +- } +- +- private Map> getSlice(List commands, boolean subColumnsOnly, int cellLimit, org.apache.cassandra.db.ConsistencyLevel consistency_level, ClientState cState) +- throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException +- { +- try (PartitionIterator results = read(commands, consistency_level, cState)) +- { +- Map> columnFamiliesMap = new HashMap<>(); +- while (results.hasNext()) +- { +- try (RowIterator iter = results.next()) +- { +- List thriftifiedColumns = thriftifyPartition(iter, subColumnsOnly, iter.isReverseOrder(), cellLimit); +- columnFamiliesMap.put(iter.partitionKey().getKey(), thriftifiedColumns); +- } +- } +- return columnFamiliesMap; +- } +- } +- +- public List get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of("key", ByteBufferUtil.bytesToHex(key), +- "column_parent", column_parent.toString(), +- "predicate", predicate.toString(), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("get_slice", traceParameters); +- } +- else +- { +- logger.trace("get_slice"); +- } +- +- try +- { +- ClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- state().hasColumnFamilyAccess(keyspace, column_parent.column_family, Permission.SELECT); +- List result = getSliceInternal(keyspace, key, column_parent, FBUtilities.nowInSeconds(), predicate, consistency_level, cState); +- return result == null ? Collections.emptyList() : result; +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- private List getSliceInternal(String keyspace, +- ByteBuffer key, +- ColumnParent column_parent, +- int nowInSec, +- SlicePredicate predicate, +- ConsistencyLevel consistency_level, +- ClientState cState) +- throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException +- { +- return multigetSliceInternal(keyspace, Collections.singletonList(key), column_parent, nowInSec, predicate, consistency_level, cState).get(key); +- } +- +- public Map> multiget_slice(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- List keysList = Lists.newArrayList(); +- for (ByteBuffer key : keys) +- keysList.add(ByteBufferUtil.bytesToHex(key)); +- Map traceParameters = ImmutableMap.of("keys", keysList.toString(), +- "column_parent", column_parent.toString(), +- "predicate", predicate.toString(), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("multiget_slice", traceParameters); +- } +- else +- { +- logger.trace("multiget_slice"); +- } +- +- try +- { +- ClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_parent.column_family, Permission.SELECT); +- return multigetSliceInternal(keyspace, keys, column_parent, FBUtilities.nowInSeconds(), predicate, consistency_level, cState); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- private ClusteringIndexFilter toInternalFilter(CFMetaData metadata, ColumnParent parent, SliceRange range) +- { +- if (metadata.isSuper() && parent.isSetSuper_column()) +- return new ClusteringIndexNamesFilter(FBUtilities.singleton(Clustering.make(parent.bufferForSuper_column()), metadata.comparator), range.reversed); +- else +- return new ClusteringIndexSliceFilter(makeSlices(metadata, range), range.reversed); +- } +- +- private Slices makeSlices(CFMetaData metadata, SliceRange range) +- { +- // Note that in thrift, the bounds are reversed if the query is reversed, but not internally. +- ByteBuffer start = range.reversed ? range.finish : range.start; +- ByteBuffer finish = range.reversed ? range.start : range.finish; +- return Slices.with(metadata.comparator, Slice.make(LegacyLayout.decodeBound(metadata, start, true).bound, LegacyLayout.decodeBound(metadata, finish, false).bound)); +- } +- +- private ClusteringIndexFilter toInternalFilter(CFMetaData metadata, ColumnParent parent, SlicePredicate predicate) +- throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- try +- { +- if (predicate.column_names != null) +- { +- if (metadata.isSuper()) +- { +- if (parent.isSetSuper_column()) +- { +- return new ClusteringIndexNamesFilter(FBUtilities.singleton(Clustering.make(parent.bufferForSuper_column()), metadata.comparator), false); +- } +- else +- { +- NavigableSet clusterings = new TreeSet<>(metadata.comparator); +- for (ByteBuffer bb : predicate.column_names) +- clusterings.add(Clustering.make(bb)); +- return new ClusteringIndexNamesFilter(clusterings, false); +- } +- } +- else +- { +- NavigableSet clusterings = new TreeSet<>(metadata.comparator); +- for (ByteBuffer bb : predicate.column_names) +- { +- LegacyLayout.LegacyCellName name = LegacyLayout.decodeCellName(metadata, parent.bufferForSuper_column(), bb); +- +- if (!name.clustering.equals(Clustering.STATIC_CLUSTERING)) +- clusterings.add(name.clustering); +- } +- +- // clusterings cannot include STATIC_CLUSTERING, so if the names filter is for static columns, clusterings +- // will be empty. However, by requesting the static columns in our ColumnFilter, this will still work. +- return new ClusteringIndexNamesFilter(clusterings, false); +- } +- } +- else +- { +- return toInternalFilter(metadata, parent, predicate.slice_range); +- } +- } +- catch (UnknownColumnException e) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage()); +- } +- } +- +- private ColumnFilter makeColumnFilter(CFMetaData metadata, ColumnParent parent, SliceRange range) +- { +- if (metadata.isSuper() && parent.isSetSuper_column()) +- { +- // We want a slice of the dynamic columns +- ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); +- ColumnDefinition def = metadata.compactValueColumn(); +- ByteBuffer start = range.reversed ? range.finish : range.start; +- ByteBuffer finish = range.reversed ? range.start : range.finish; +- builder.slice(def, start.hasRemaining() ? CellPath.create(start) : CellPath.BOTTOM, finish.hasRemaining() ? CellPath.create(finish) : CellPath.TOP); +- +- // We also want to add any staticly defined column if it's within the range +- AbstractType cmp = metadata.thriftColumnNameType(); +- for (ColumnDefinition column : metadata.partitionColumns()) +- { +- if (CompactTables.isSuperColumnMapColumn(column)) +- continue; +- +- ByteBuffer name = column.name.bytes; +- if (cmp.compare(name, start) < 0 || cmp.compare(finish, name) > 0) +- continue; +- +- builder.add(column); +- } +- return builder.build(); +- } +- return makeColumnFilter(metadata, makeSlices(metadata, range)); +- } +- +- private ColumnFilter makeColumnFilter(CFMetaData metadata, Slices slices) +- { +- PartitionColumns columns = metadata.partitionColumns(); +- if (metadata.isStaticCompactTable() && !columns.statics.isEmpty()) +- { +- PartitionColumns.Builder builder = PartitionColumns.builder(); +- builder.addAll(columns.regulars); +- // We only want to include the static columns that are selected by the slices +- for (ColumnDefinition def : columns.statics) +- { +- if (slices.selects(Clustering.make(def.name.bytes))) +- builder.add(def); +- } +- columns = builder.build(); +- } +- return ColumnFilter.selection(columns); +- } +- +- private ColumnFilter makeColumnFilter(CFMetaData metadata, ColumnParent parent, SlicePredicate predicate) +- throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- try +- { +- if (predicate.column_names != null) +- { +- if (metadata.isSuper()) +- { +- if (parent.isSetSuper_column()) +- { +- ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); +- ColumnDefinition dynamicDef = metadata.compactValueColumn(); +- for (ByteBuffer bb : predicate.column_names) +- { +- ColumnDefinition staticDef = metadata.getColumnDefinition(bb); +- if (staticDef == null) +- builder.select(dynamicDef, CellPath.create(bb)); +- else +- builder.add(staticDef); +- } +- return builder.build(); +- } +- else +- { +- return ColumnFilter.all(metadata); +- } +- } +- else +- { +- PartitionColumns.Builder builder = PartitionColumns.builder(); +- for (ByteBuffer bb : predicate.column_names) +- { +- LegacyLayout.LegacyCellName name = LegacyLayout.decodeCellName(metadata, parent.bufferForSuper_column(), bb); +- builder.add(name.column); +- } +- +- if (metadata.isStaticCompactTable()) +- builder.add(metadata.compactValueColumn()); +- +- return ColumnFilter.selection(builder.build()); +- } +- } +- else +- { +- return makeColumnFilter(metadata, parent, predicate.slice_range); +- } +- } +- catch (UnknownColumnException e) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage()); +- } +- } +- +- private DataLimits getLimits(int partitionLimit, boolean countSuperColumns, SlicePredicate predicate) +- { +- int cellsPerPartition = predicate.slice_range == null ? Integer.MAX_VALUE : predicate.slice_range.count; +- return getLimits(partitionLimit, countSuperColumns, cellsPerPartition); +- } +- +- private DataLimits getLimits(int partitionLimit, boolean countSuperColumns, int perPartitionCount) +- { +- return countSuperColumns +- ? DataLimits.superColumnCountingLimits(partitionLimit, perPartitionCount) +- : DataLimits.thriftLimits(partitionLimit, perPartitionCount); +- } +- +- private Map> multigetSliceInternal(String keyspace, +- List keys, +- ColumnParent column_parent, +- int nowInSec, +- SlicePredicate predicate, +- ConsistencyLevel consistency_level, +- ClientState cState) +- throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException +- { +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family); +- ThriftValidation.validateColumnParent(metadata, column_parent); +- ThriftValidation.validatePredicate(metadata, column_parent, predicate); +- +- org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); +- consistencyLevel.validateForRead(keyspace); +- +- List commands = new ArrayList<>(keys.size()); +- ColumnFilter columnFilter = makeColumnFilter(metadata, column_parent, predicate); +- ClusteringIndexFilter filter = toInternalFilter(metadata, column_parent, predicate); +- DataLimits limits = getLimits(1, metadata.isSuper() && !column_parent.isSetSuper_column(), predicate); +- +- for (ByteBuffer key: keys) +- { +- ThriftValidation.validateKey(metadata, key); +- DecoratedKey dk = metadata.decorateKey(key); +- commands.add(SinglePartitionReadCommand.create(true, metadata, nowInSec, columnFilter, RowFilter.NONE, limits, dk, filter)); +- } +- +- return getSlice(commands, column_parent.isSetSuper_column(), limits.perPartitionCount(), consistencyLevel, cState); +- } +- +- public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level) +- throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of("key", ByteBufferUtil.bytesToHex(key), +- "column_path", column_path.toString(), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("get", traceParameters); +- } +- else +- { +- logger.trace("get"); +- } +- +- try +- { +- ThriftClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_path.column_family, Permission.SELECT); +- +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family); +- ThriftValidation.validateColumnPath(metadata, column_path); +- org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); +- consistencyLevel.validateForRead(keyspace); +- +- ThriftValidation.validateKey(metadata, key); +- +- ColumnFilter columns; +- ClusteringIndexFilter filter; +- if (metadata.isSuper()) +- { +- if (column_path.column == null) +- { +- // Selects a full super column +- columns = ColumnFilter.all(metadata); +- } +- else +- { +- // Selects a single column within a super column +- ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); +- ColumnDefinition staticDef = metadata.getColumnDefinition(column_path.column); +- ColumnDefinition dynamicDef = metadata.compactValueColumn(); +- +- if (staticDef != null) +- builder.add(staticDef); +- // Note that even if there is a staticDef, we still query the dynamicDef since we can't guarantee the static one hasn't +- // been created after data has been inserted for that definition +- builder.select(dynamicDef, CellPath.create(column_path.column)); +- columns = builder.build(); +- } +- filter = new ClusteringIndexNamesFilter(FBUtilities.singleton(Clustering.make(column_path.super_column), metadata.comparator), +- false); +- } +- else +- { +- LegacyLayout.LegacyCellName cellname = LegacyLayout.decodeCellName(metadata, column_path.super_column, column_path.column); +- if (cellname.clustering == Clustering.STATIC_CLUSTERING) +- { +- // Same as above: even if we're querying a static column, we still query the equivalent dynamic column and value as some +- // values might have been created post creation of the column (ThriftResultMerger then ensures we get only one result). +- ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); +- builder.add(cellname.column); +- builder.add(metadata.compactValueColumn()); +- columns = builder.build(); +- filter = new ClusteringIndexNamesFilter(FBUtilities.singleton(Clustering.make(column_path.column), metadata.comparator), false); +- } +- else +- { +- columns = ColumnFilter.selection(PartitionColumns.of(cellname.column)); +- filter = new ClusteringIndexNamesFilter(FBUtilities.singleton(cellname.clustering, metadata.comparator), false); +- } +- } +- +- DecoratedKey dk = metadata.decorateKey(key); +- SinglePartitionReadCommand command = SinglePartitionReadCommand.create(true, metadata, FBUtilities.nowInSeconds(), columns, RowFilter.NONE, DataLimits.NONE, dk, filter); +- +- try (RowIterator result = PartitionIterators.getOnlyElement(read(Arrays.asList(command), consistencyLevel, cState), command)) +- { +- if (!result.hasNext()) +- throw new NotFoundException(); +- +- List tcolumns = thriftifyPartition(result, metadata.isSuper() && column_path.column != null, result.isReverseOrder(), 1); +- if (tcolumns.isEmpty()) +- throw new NotFoundException(); +- assert tcolumns.size() == 1; +- return tcolumns.get(0); +- } +- } +- catch (UnknownColumnException e) +- { +- throw new InvalidRequestException(e.getMessage()); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of("key", ByteBufferUtil.bytesToHex(key), +- "column_parent", column_parent.toString(), +- "predicate", predicate.toString(), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("get_count", traceParameters); +- } +- else +- { +- logger.trace("get_count"); +- } +- +- try +- { +- ThriftClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_parent.column_family, Permission.SELECT); +- Keyspace keyspaceName = Keyspace.open(keyspace); +- ColumnFamilyStore cfs = keyspaceName.getColumnFamilyStore(column_parent.column_family); +- int nowInSec = FBUtilities.nowInSeconds(); +- +- if (predicate.column_names != null) +- return getSliceInternal(keyspace, key, column_parent, nowInSec, predicate, consistency_level, cState).size(); +- +- int pageSize; +- // request by page if this is a large row +- if (cfs.getMeanColumns() > 0) +- { +- int averageColumnSize = (int) (cfs.metric.meanPartitionSize.getValue() / cfs.getMeanColumns()); +- pageSize = Math.min(COUNT_PAGE_SIZE, 4 * 1024 * 1024 / averageColumnSize); +- pageSize = Math.max(2, pageSize); +- logger.trace("average row column size is {}; using pageSize of {}", averageColumnSize, pageSize); +- } +- else +- { +- pageSize = COUNT_PAGE_SIZE; +- } +- +- SliceRange sliceRange = predicate.slice_range == null +- ? new SliceRange(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE) +- : predicate.slice_range; +- +- ColumnFilter columnFilter; +- ClusteringIndexFilter filter; +- CFMetaData metadata = cfs.metadata; +- if (metadata.isSuper() && !column_parent.isSetSuper_column()) +- { +- // If we count on a super column table without having set the super column name, we're in fact interested by the count of super columns +- columnFilter = ColumnFilter.all(metadata); +- filter = new ClusteringIndexSliceFilter(makeSlices(metadata, sliceRange), sliceRange.reversed); +- } +- else +- { +- columnFilter = makeColumnFilter(metadata, column_parent, sliceRange); +- filter = toInternalFilter(metadata, column_parent, sliceRange); +- } +- +- DataLimits limits = getLimits(1, metadata.isSuper() && !column_parent.isSetSuper_column(), predicate); +- DecoratedKey dk = metadata.decorateKey(key); +- +- return QueryPagers.countPaged(metadata, +- dk, +- columnFilter, +- filter, +- limits, +- ThriftConversion.fromThrift(consistency_level), +- cState, +- pageSize, +- nowInSec, +- true); +- } +- catch (IllegalArgumentException e) +- { +- // CASSANDRA-5701 +- throw new InvalidRequestException(e.getMessage()); +- } +- catch (RequestExecutionException e) +- { +- throw ThriftConversion.rethrow(e); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- public Map multiget_count(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- List keysList = Lists.newArrayList(); +- for (ByteBuffer key : keys) +- { +- keysList.add(ByteBufferUtil.bytesToHex(key)); +- } +- Map traceParameters = ImmutableMap.of("keys", keysList.toString(), +- "column_parent", column_parent.toString(), +- "predicate", predicate.toString(), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("multiget_count", traceParameters); +- } +- else +- { +- logger.trace("multiget_count"); +- } +- +- try +- { +- ThriftClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_parent.column_family, Permission.SELECT); +- +- Map counts = new HashMap<>(); +- Map> columnFamiliesMap = multigetSliceInternal(keyspace, +- keys, +- column_parent, +- FBUtilities.nowInSeconds(), +- predicate, +- consistency_level, +- cState); +- +- for (Map.Entry> cf : columnFamiliesMap.entrySet()) +- counts.put(cf.getKey(), cf.getValue().size()); +- return counts; +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- private Cell cellFromColumn(CFMetaData metadata, LegacyLayout.LegacyCellName name, Column column) +- { +- CellPath path = name.collectionElement == null ? null : CellPath.create(name.collectionElement); +- int ttl = getTtl(metadata, column); +- return ttl == LivenessInfo.NO_TTL +- ? BufferCell.live(name.column, column.timestamp, column.value, path) +- : BufferCell.expiring(name.column, column.timestamp, ttl, FBUtilities.nowInSeconds(), column.value, path); +- } +- +- private int getTtl(CFMetaData metadata,Column column) +- { +- if (!column.isSetTtl()) +- return metadata.params.defaultTimeToLive; +- +- if (column.ttl == LivenessInfo.NO_TTL && metadata.params.defaultTimeToLive != LivenessInfo.NO_TTL) +- return LivenessInfo.NO_TTL; +- +- return column.ttl; +- } +- +- private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) +- throws RequestValidationException, UnavailableException, TimedOutException +- { +- ThriftClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_parent.column_family, Permission.MODIFY); +- +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false); +- if (metadata.isView()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Cannot modify Materialized Views directly"); +- +- ThriftValidation.validateKey(metadata, key); +- ThriftValidation.validateColumnParent(metadata, column_parent); +- // SuperColumn field is usually optional, but not when we're inserting +- if (metadata.isSuper() && column_parent.super_column == null) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family); +- } +- ThriftValidation.validateColumnNames(metadata, column_parent, Collections.singletonList(column.name)); +- ThriftValidation.validateColumnData(metadata, column_parent.super_column, column); +- +- org.apache.cassandra.db.Mutation mutation; +- try +- { +- LegacyLayout.LegacyCellName name = LegacyLayout.decodeCellName(metadata, column_parent.super_column, column.name); +- Cell cell = cellFromColumn(metadata, name, column); +- PartitionUpdate update = PartitionUpdate.singleRowUpdate(metadata, key, BTreeRow.singleCellRow(name.clustering, cell)); +- +- // Indexed column values cannot be larger than 64K. See CASSANDRA-3057/4240 for more details +- Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(update); +- +- mutation = new org.apache.cassandra.db.Mutation(update); +- } +- catch (MarshalException|UnknownColumnException e) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage()); +- } +- doInsert(consistency_level, Collections.singletonList(mutation)); +- } +- +- public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of("key", ByteBufferUtil.bytesToHex(key), +- "column_parent", column_parent.toString(), +- "column", column.toString(), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("insert", traceParameters); +- } +- else +- { +- logger.trace("insert"); +- } +- +- try +- { +- internal_insert(key, column_parent, column, consistency_level); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- public CASResult cas(ByteBuffer key, +- String column_family, +- List expected, +- List updates, +- ConsistencyLevel serial_consistency_level, +- ConsistencyLevel commit_consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- ImmutableMap.Builder builder = ImmutableMap.builder(); +- builder.put("key", ByteBufferUtil.bytesToHex(key)); +- builder.put("column_family", column_family); +- builder.put("old", expected.toString()); +- builder.put("updates", updates.toString()); +- builder.put("consistency_level", commit_consistency_level.name()); +- builder.put("serial_consistency_level", serial_consistency_level.name()); +- Map traceParameters = builder.build(); +- +- Tracing.instance.begin("cas", traceParameters); +- } +- else +- { +- logger.trace("cas"); +- } +- +- try +- { +- ThriftClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_family, Permission.MODIFY); +- // CAS updates can be used to simulate a get request, so should require Permission.SELECT. +- cState.hasColumnFamilyAccess(keyspace, column_family, Permission.SELECT); +- +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_family, false); +- if (metadata.isView()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Cannot modify Materialized Views directly"); +- +- ThriftValidation.validateKey(metadata, key); +- if (metadata.isSuper()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("CAS does not support supercolumns"); +- +- Iterable names = Iterables.transform(updates, column -> column.name); +- ThriftValidation.validateColumnNames(metadata, new ColumnParent(column_family), names); +- for (Column column : updates) +- ThriftValidation.validateColumnData(metadata, null, column); +- +- DecoratedKey dk = metadata.decorateKey(key); +- int nowInSec = FBUtilities.nowInSeconds(); +- +- PartitionUpdate partitionUpdates = PartitionUpdate.fromIterator(LegacyLayout.toRowIterator(metadata, dk, toLegacyCells(metadata, updates, nowInSec).iterator(), nowInSec), ColumnFilter.all(metadata)); +- // Indexed column values cannot be larger than 64K. See CASSANDRA-3057/4240 for more details +- Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(partitionUpdates); +- +- schedule(DatabaseDescriptor.getWriteRpcTimeout()); +- try (RowIterator result = StorageProxy.cas(cState.getKeyspace(), +- column_family, +- dk, +- new ThriftCASRequest(toLegacyCells(metadata, expected, nowInSec), partitionUpdates, nowInSec), +- ThriftConversion.fromThrift(serial_consistency_level), +- ThriftConversion.fromThrift(commit_consistency_level), +- cState)) +- { +- return result == null +- ? new CASResult(true) +- : new CASResult(false).setCurrent_values(thriftifyColumnsAsColumns(metadata, LegacyLayout.fromRowIterator(result).right)); +- } +- } +- catch (UnknownColumnException e) +- { +- throw new InvalidRequestException(e.getMessage()); +- } +- catch (RequestTimeoutException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- catch (RequestExecutionException e) +- { +- throw ThriftConversion.rethrow(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- private LegacyLayout.LegacyCell toLegacyCell(CFMetaData metadata, Column column, int nowInSec) throws UnknownColumnException +- { +- return toLegacyCell(metadata, null, column, nowInSec); +- } +- +- private LegacyLayout.LegacyCell toLegacyCell(CFMetaData metadata, ByteBuffer superColumnName, Column column, int nowInSec) +- throws UnknownColumnException +- { +- return column.ttl > 0 +- ? LegacyLayout.LegacyCell.expiring(metadata, superColumnName, column.name, column.value, column.timestamp, column.ttl, nowInSec) +- : LegacyLayout.LegacyCell.regular(metadata, superColumnName, column.name, column.value, column.timestamp); +- } +- +- private LegacyLayout.LegacyCell toLegacyDeletion(CFMetaData metadata, ByteBuffer name, long timestamp, int nowInSec) +- throws UnknownColumnException +- { +- return toLegacyDeletion(metadata, null, name, timestamp, nowInSec); +- } +- +- private LegacyLayout.LegacyCell toLegacyDeletion(CFMetaData metadata, ByteBuffer superColumnName, ByteBuffer name, long timestamp, int nowInSec) +- throws UnknownColumnException +- { +- return LegacyLayout.LegacyCell.tombstone(metadata, superColumnName, name, timestamp, nowInSec); +- } +- +- private LegacyLayout.LegacyCell toCounterLegacyCell(CFMetaData metadata, CounterColumn column) +- throws UnknownColumnException +- { +- return toCounterLegacyCell(metadata, null, column); +- } +- +- private LegacyLayout.LegacyCell toCounterLegacyCell(CFMetaData metadata, ByteBuffer superColumnName, CounterColumn column) +- throws UnknownColumnException +- { +- return LegacyLayout.LegacyCell.counter(metadata, superColumnName, column.name, column.value); +- } +- +- private void sortAndMerge(CFMetaData metadata, List cells, int nowInSec) +- { +- Collections.sort(cells, LegacyLayout.legacyCellComparator(metadata)); +- +- // After sorting, if we have multiple cells for the same "cellname", we want to merge those together. +- Comparator comparator = LegacyLayout.legacyCellNameComparator(metadata, false); +- +- int previous = 0; // The last element that was set +- for (int current = 1; current < cells.size(); current++) +- { +- LegacyLayout.LegacyCell pc = cells.get(previous); +- LegacyLayout.LegacyCell cc = cells.get(current); +- +- // There is really only 2 possible comparison: < 0 or == 0 since we've sorted already +- int cmp = comparator.compare(pc.name, cc.name); +- if (cmp == 0) +- { +- // current and previous are the same cell. Merge current into previous +- // (and so previous + 1 will be "free"). +- Conflicts.Resolution res; +- if (metadata.isCounter()) +- { +- res = Conflicts.resolveCounter(pc.timestamp, pc.isLive(nowInSec), pc.value, +- cc.timestamp, cc.isLive(nowInSec), cc.value); +- +- } +- else +- { +- res = Conflicts.resolveRegular(pc.timestamp, pc.isLive(nowInSec), pc.localDeletionTime, pc.value, +- cc.timestamp, cc.isLive(nowInSec), cc.localDeletionTime, cc.value); +- } +- +- switch (res) +- { +- case LEFT_WINS: +- // The previous cell wins, we'll just ignore current +- break; +- case RIGHT_WINS: +- cells.set(previous, cc); +- break; +- case MERGE: +- assert metadata.isCounter(); +- ByteBuffer merged = Conflicts.mergeCounterValues(pc.value, cc.value); +- cells.set(previous, LegacyLayout.LegacyCell.counter(pc.name, merged)); +- break; +- } +- } +- else +- { +- // cell.get(previous) < cells.get(current), so move current just after previous if needs be +- ++previous; +- if (previous != current) +- cells.set(previous, cc); +- } +- } +- +- // The last element we want is previous, so trim anything after that +- for (int i = cells.size() - 1; i > previous; i--) +- cells.remove(i); +- } +- +- private List toLegacyCells(CFMetaData metadata, List columns, int nowInSec) +- throws UnknownColumnException +- { +- List cells = new ArrayList<>(columns.size()); +- for (Column column : columns) +- cells.add(toLegacyCell(metadata, column, nowInSec)); +- +- sortAndMerge(metadata, cells, nowInSec); +- return cells; +- } +- +- private List createMutationList(ConsistencyLevel consistency_level, +- Map>> mutation_map, +- boolean allowCounterMutations) +- throws RequestValidationException, InvalidRequestException +- { +- List mutations = new ArrayList<>(); +- ThriftClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- int nowInSec = FBUtilities.nowInSeconds(); +- +- for (Map.Entry>> mutationEntry: mutation_map.entrySet()) +- { +- ByteBuffer key = mutationEntry.getKey(); +- +- // We need to separate mutation for standard cf and counter cf (that will be encapsulated in a +- // CounterMutation) because it doesn't follow the same code path +- org.apache.cassandra.db.Mutation standardMutation = null; +- org.apache.cassandra.db.Mutation counterMutation = null; +- +- Map> columnFamilyToMutations = mutationEntry.getValue(); +- for (Map.Entry> columnFamilyMutations : columnFamilyToMutations.entrySet()) +- { +- String cfName = columnFamilyMutations.getKey(); +- List muts = columnFamilyMutations.getValue(); +- +- cState.hasColumnFamilyAccess(keyspace, cfName, Permission.MODIFY); +- +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfName); +- if (metadata.isView()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Cannot modify Materialized Views directly"); +- +- ThriftValidation.validateKey(metadata, key); +- if (metadata.isCounter()) +- ThriftConversion.fromThrift(consistency_level).validateCounterForWrite(metadata); +- +- LegacyLayout.LegacyDeletionInfo delInfo = LegacyLayout.LegacyDeletionInfo.live(); +- List cells = new ArrayList<>(); +- for (Mutation m : muts) +- { +- ThriftValidation.validateMutation(metadata, m); +- +- if (m.deletion != null) +- { +- deleteColumnOrSuperColumn(delInfo, cells, metadata, m.deletion, nowInSec); +- } +- if (m.column_or_supercolumn != null) +- { +- addColumnOrSuperColumn(cells, metadata, m.column_or_supercolumn, nowInSec); +- } +- } +- +- sortAndMerge(metadata, cells, nowInSec); +- DecoratedKey dk = metadata.decorateKey(key); +- PartitionUpdate update = PartitionUpdate.fromIterator(LegacyLayout.toUnfilteredRowIterator(metadata, dk, delInfo, cells.iterator()), ColumnFilter.all(metadata)); +- +- // Indexed column values cannot be larger than 64K. See CASSANDRA-3057/4240 for more details +- Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(update); +- +- org.apache.cassandra.db.Mutation mutation; +- if (metadata.isCounter()) +- { +- counterMutation = counterMutation == null ? new org.apache.cassandra.db.Mutation(keyspace, dk) : counterMutation; +- mutation = counterMutation; +- } +- else +- { +- standardMutation = standardMutation == null ? new org.apache.cassandra.db.Mutation(keyspace, dk) : standardMutation; +- mutation = standardMutation; +- } +- mutation.add(update); +- } +- if (standardMutation != null && !standardMutation.isEmpty()) +- mutations.add(standardMutation); +- +- if (counterMutation != null && !counterMutation.isEmpty()) +- { +- if (allowCounterMutations) +- mutations.add(new CounterMutation(counterMutation, ThriftConversion.fromThrift(consistency_level))); +- else +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Counter mutations are not allowed in atomic batches"); +- } +- } +- +- return mutations; +- } +- +- private void addColumnOrSuperColumn(List cells, CFMetaData cfm, ColumnOrSuperColumn cosc, int nowInSec) +- throws InvalidRequestException +- { +- try +- { +- if (cosc.super_column != null) +- { +- for (Column column : cosc.super_column.columns) +- cells.add(toLegacyCell(cfm, cosc.super_column.name, column, nowInSec)); +- } +- else if (cosc.column != null) +- { +- cells.add(toLegacyCell(cfm, cosc.column, nowInSec)); +- } +- else if (cosc.counter_super_column != null) +- { +- for (CounterColumn column : cosc.counter_super_column.columns) +- cells.add(toCounterLegacyCell(cfm, cosc.counter_super_column.name, column)); +- } +- else // cosc.counter_column != null +- { +- cells.add(toCounterLegacyCell(cfm, cosc.counter_column)); +- } +- } +- catch (UnknownColumnException e) +- { +- throw new InvalidRequestException(e.getMessage()); +- } +- } +- +- private void addRange(CFMetaData cfm, LegacyLayout.LegacyDeletionInfo delInfo, ClusteringBound start, ClusteringBound end, long timestamp, int nowInSec) +- { +- delInfo.add(cfm, new RangeTombstone(Slice.make(start, end), new DeletionTime(timestamp, nowInSec))); +- } +- +- private void deleteColumnOrSuperColumn(LegacyLayout.LegacyDeletionInfo delInfo, List cells, CFMetaData cfm, Deletion del, int nowInSec) +- throws InvalidRequestException +- { +- if (del.predicate != null && del.predicate.column_names != null) +- { +- for (ByteBuffer c : del.predicate.column_names) +- { +- try +- { +- if (del.super_column == null && cfm.isSuper()) +- addRange(cfm, delInfo, ClusteringBound.inclusiveStartOf(c), ClusteringBound.inclusiveEndOf(c), del.timestamp, nowInSec); +- else if (del.super_column != null) +- cells.add(toLegacyDeletion(cfm, del.super_column, c, del.timestamp, nowInSec)); +- else +- cells.add(toLegacyDeletion(cfm, c, del.timestamp, nowInSec)); +- } +- catch (UnknownColumnException e) +- { +- throw new InvalidRequestException(e.getMessage()); +- } +- } +- } +- else if (del.predicate != null && del.predicate.slice_range != null) +- { +- if (del.super_column == null) +- { +- LegacyLayout.LegacyBound start = LegacyLayout.decodeBound(cfm, del.predicate.getSlice_range().start, true); +- LegacyLayout.LegacyBound end = LegacyLayout.decodeBound(cfm, del.predicate.getSlice_range().finish, false); +- delInfo.add(cfm, new LegacyLayout.LegacyRangeTombstone(start, end, new DeletionTime(del.timestamp, nowInSec))); +- } +- else +- { +- // Since we use a map for subcolumns, we would need range tombstone for collections to support this. +- // And while we may want those some day, this require a bit of additional work. And since super columns +- // are basically deprecated since a long time, and range tombstone on them has been only very recently +- // added so that no thrift driver actually supports it to the best of my knowledge, it's likely ok to +- // discontinue support for this. If it turns out that this is blocking the update of someone, we can +- // decide then if we want to tackle the addition of range tombstone for collections then. +- throw new InvalidRequestException("Cannot delete a range of subcolumns in a super column"); +- } +- } +- else +- { +- if (del.super_column != null) +- addRange(cfm, delInfo, ClusteringBound.inclusiveStartOf(del.super_column), ClusteringBound.inclusiveEndOf(del.super_column), del.timestamp, nowInSec); +- else +- delInfo.add(new DeletionTime(del.timestamp, nowInSec)); +- } +- } +- +- public void batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = Maps.newLinkedHashMap(); +- for (Map.Entry>> mutationEntry : mutation_map.entrySet()) +- { +- traceParameters.put(ByteBufferUtil.bytesToHex(mutationEntry.getKey()), +- Joiner.on(";").withKeyValueSeparator(":").join(mutationEntry.getValue())); +- } +- traceParameters.put("consistency_level", consistency_level.name()); +- Tracing.instance.begin("batch_mutate", traceParameters); +- } +- else +- { +- logger.trace("batch_mutate"); +- } +- +- try +- { +- doInsert(consistency_level, createMutationList(consistency_level, mutation_map, true)); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- public void atomic_batch_mutate(Map>> mutation_map, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = Maps.newLinkedHashMap(); +- for (Map.Entry>> mutationEntry : mutation_map.entrySet()) +- { +- traceParameters.put(ByteBufferUtil.bytesToHex(mutationEntry.getKey()), +- Joiner.on(";").withKeyValueSeparator(":").join(mutationEntry.getValue())); +- } +- traceParameters.put("consistency_level", consistency_level.name()); +- Tracing.instance.begin("atomic_batch_mutate", traceParameters); +- } +- else +- { +- logger.trace("atomic_batch_mutate"); +- } +- +- try +- { +- doInsert(consistency_level, createMutationList(consistency_level, mutation_map, false), true); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp) +- throws RequestValidationException, UnavailableException, TimedOutException +- { +- ThriftClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_path.column_family, Permission.MODIFY); +- +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family, isCommutativeOp); +- if (metadata.isView()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Cannot modify Materialized Views directly"); +- +- ThriftValidation.validateKey(metadata, key); +- ThriftValidation.validateColumnPathOrParent(metadata, column_path); +- if (isCommutativeOp) +- ThriftConversion.fromThrift(consistency_level).validateCounterForWrite(metadata); +- +- DecoratedKey dk = metadata.decorateKey(key); +- +- int nowInSec = FBUtilities.nowInSeconds(); +- PartitionUpdate update; +- if (column_path.super_column == null && column_path.column == null) +- { +- update = PartitionUpdate.fullPartitionDelete(metadata, dk, timestamp, nowInSec); +- } +- else if (column_path.super_column != null && column_path.column == null) +- { +- Row row = BTreeRow.emptyDeletedRow(Clustering.make(column_path.super_column), Row.Deletion.regular(new DeletionTime(timestamp, nowInSec))); +- update = PartitionUpdate.singleRowUpdate(metadata, dk, row); +- } +- else +- { +- try +- { +- LegacyLayout.LegacyCellName name = LegacyLayout.decodeCellName(metadata, column_path.super_column, column_path.column); +- CellPath path = name.collectionElement == null ? null : CellPath.create(name.collectionElement); +- Cell cell = BufferCell.tombstone(name.column, timestamp, nowInSec, path); +- update = PartitionUpdate.singleRowUpdate(metadata, dk, BTreeRow.singleCellRow(name.clustering, cell)); +- } +- catch (UnknownColumnException e) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage()); +- } +- } +- +- org.apache.cassandra.db.Mutation mutation = new org.apache.cassandra.db.Mutation(update); +- +- if (isCommutativeOp) +- doInsert(consistency_level, Collections.singletonList(new CounterMutation(mutation, ThriftConversion.fromThrift(consistency_level)))); +- else +- doInsert(consistency_level, Collections.singletonList(mutation)); +- } +- +- public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of("key", ByteBufferUtil.bytesToHex(key), +- "column_path", column_path.toString(), +- "timestamp", timestamp + "", +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("remove", traceParameters); +- } +- else +- { +- logger.trace("remove"); +- } +- +- try +- { +- internal_remove(key, column_path, timestamp, consistency_level, false); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- private void doInsert(ConsistencyLevel consistency_level, List mutations) +- throws UnavailableException, TimedOutException, org.apache.cassandra.exceptions.InvalidRequestException +- { +- doInsert(consistency_level, mutations, false); +- } +- +- private void doInsert(ConsistencyLevel consistency_level, List mutations, boolean mutateAtomically) +- throws UnavailableException, TimedOutException, org.apache.cassandra.exceptions.InvalidRequestException +- { +- org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); +- consistencyLevel.validateForWrite(state().getKeyspace()); +- if (mutations.isEmpty()) +- return; +- +- long timeout = Long.MAX_VALUE; +- for (IMutation m : mutations) +- timeout = Longs.min(timeout, m.getTimeout()); +- +- schedule(timeout); +- try +- { +- StorageProxy.mutateWithTriggers(mutations, consistencyLevel, mutateAtomically); +- } +- catch (RequestExecutionException e) +- { +- ThriftConversion.rethrow(e); +- } +- finally +- { +- release(); +- } +- } +- +- private void validateLogin() throws InvalidRequestException +- { +- try +- { +- state().validateLogin(); +- } +- catch (UnauthorizedException e) +- { +- throw new InvalidRequestException(e.getMessage()); +- } +- } +- +- public KsDef describe_keyspace(String keyspaceName) throws NotFoundException, InvalidRequestException +- { +- validateLogin(); +- +- KeyspaceMetadata ksm = Schema.instance.getKSMetaData(keyspaceName); +- if (ksm == null) +- throw new NotFoundException(); +- +- return ThriftConversion.toThrift(ksm); +- } +- +- public List get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of( +- "column_parent", column_parent.toString(), +- "predicate", predicate.toString(), +- "range", range.toString(), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("get_range_slices", traceParameters); +- } +- else +- { +- logger.trace("range_slice"); +- } +- +- try +- { +- ThriftClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_parent.column_family, Permission.SELECT); +- +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family); +- ThriftValidation.validateColumnParent(metadata, column_parent); +- ThriftValidation.validatePredicate(metadata, column_parent, predicate); +- ThriftValidation.validateKeyRange(metadata, column_parent.super_column, range); +- +- org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); +- consistencyLevel.validateForRead(keyspace); +- +- IPartitioner p = metadata.partitioner; +- AbstractBounds bounds; +- if (range.start_key == null) +- { +- Token.TokenFactory tokenFactory = p.getTokenFactory(); +- Token left = tokenFactory.fromString(range.start_token); +- Token right = tokenFactory.fromString(range.end_token); +- bounds = Range.makeRowRange(left, right); +- } +- else +- { +- PartitionPosition end = range.end_key == null +- ? p.getTokenFactory().fromString(range.end_token).maxKeyBound() +- : PartitionPosition.ForKey.get(range.end_key, p); +- bounds = new Bounds<>(PartitionPosition.ForKey.get(range.start_key, p), end); +- } +- int nowInSec = FBUtilities.nowInSeconds(); +- schedule(DatabaseDescriptor.getRangeRpcTimeout()); +- try +- { +- ColumnFilter columns = makeColumnFilter(metadata, column_parent, predicate); +- ClusteringIndexFilter filter = toInternalFilter(metadata, column_parent, predicate); +- DataLimits limits = getLimits(range.count, metadata.isSuper() && !column_parent.isSetSuper_column(), predicate); +- PartitionRangeReadCommand cmd = new PartitionRangeReadCommand(false, +- 0, +- true, +- metadata, +- nowInSec, +- columns, +- ThriftConversion.rowFilterFromThrift(metadata, range.row_filter), +- limits, +- new DataRange(bounds, filter), +- Optional.empty()); +- try (PartitionIterator results = StorageProxy.getRangeSlice(cmd, consistencyLevel)) +- { +- assert results != null; +- return thriftifyKeySlices(results, column_parent, limits.perPartitionCount()); +- } +- } +- finally +- { +- release(); +- } +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- catch (RequestExecutionException e) +- { +- throw ThriftConversion.rethrow(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- public List get_paged_slice(String column_family, KeyRange range, ByteBuffer start_column, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException, TException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of("column_family", column_family, +- "range", range.toString(), +- "start_column", ByteBufferUtil.bytesToHex(start_column), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("get_paged_slice", traceParameters); +- } +- else +- { +- logger.trace("get_paged_slice"); +- } +- +- try +- { +- +- ThriftClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_family, Permission.SELECT); +- +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_family); +- ThriftValidation.validateKeyRange(metadata, null, range); +- +- org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); +- consistencyLevel.validateForRead(keyspace); +- +- IPartitioner p = metadata.partitioner; +- AbstractBounds bounds; +- if (range.start_key == null) +- { +- // (token, key) is unsupported, assume (token, token) +- Token.TokenFactory tokenFactory = p.getTokenFactory(); +- Token left = tokenFactory.fromString(range.start_token); +- Token right = tokenFactory.fromString(range.end_token); +- bounds = Range.makeRowRange(left, right); +- } +- else +- { +- PartitionPosition end = range.end_key == null +- ? p.getTokenFactory().fromString(range.end_token).maxKeyBound() +- : PartitionPosition.ForKey.get(range.end_key, p); +- bounds = new Bounds<>(PartitionPosition.ForKey.get(range.start_key, p), end); +- } +- +- if (range.row_filter != null && !range.row_filter.isEmpty()) +- throw new InvalidRequestException("Cross-row paging is not supported along with index clauses"); +- +- int nowInSec = FBUtilities.nowInSeconds(); +- schedule(DatabaseDescriptor.getRangeRpcTimeout()); +- try +- { +- ClusteringIndexFilter filter = new ClusteringIndexSliceFilter(Slices.ALL, false); +- DataLimits limits = getLimits(range.count, true, Integer.MAX_VALUE); +- Clustering pageFrom = metadata.isSuper() +- ? Clustering.make(start_column) +- : LegacyLayout.decodeCellName(metadata, start_column).clustering; +- PartitionRangeReadCommand cmd = new PartitionRangeReadCommand(false, +- 0, +- true, +- metadata, +- nowInSec, +- ColumnFilter.all(metadata), +- RowFilter.NONE, +- limits, +- new DataRange(bounds, filter).forPaging(bounds, metadata.comparator, pageFrom, true), +- Optional.empty()); +- try (PartitionIterator results = StorageProxy.getRangeSlice(cmd, consistencyLevel)) +- { +- return thriftifyKeySlices(results, new ColumnParent(column_family), limits.perPartitionCount()); +- } +- } +- catch (UnknownColumnException e) +- { +- throw new InvalidRequestException(e.getMessage()); +- } +- finally +- { +- release(); +- } +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- catch (RequestExecutionException e) +- { +- throw ThriftConversion.rethrow(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- private List thriftifyKeySlices(PartitionIterator results, ColumnParent column_parent, int cellLimit) +- { +- try (PartitionIterator iter = results) +- { +- List keySlices = new ArrayList<>(); +- while (iter.hasNext()) +- { +- try (RowIterator partition = iter.next()) +- { +- List thriftifiedColumns = thriftifyPartition(partition, column_parent.super_column != null, partition.isReverseOrder(), cellLimit); +- keySlices.add(new KeySlice(partition.partitionKey().getKey(), thriftifiedColumns)); +- } +- } +- +- return keySlices; +- } +- } +- +- public List get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException, TException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of("column_parent", column_parent.toString(), +- "index_clause", index_clause.toString(), +- "slice_predicate", column_predicate.toString(), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("get_indexed_slices", traceParameters); +- } +- else +- { +- logger.trace("scan"); +- } +- +- try +- { +- ThriftClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_parent.column_family, Permission.SELECT); +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false); +- ThriftValidation.validateColumnParent(metadata, column_parent); +- ThriftValidation.validatePredicate(metadata, column_parent, column_predicate); +- ThriftValidation.validateIndexClauses(metadata, index_clause); +- org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); +- consistencyLevel.validateForRead(keyspace); +- +- IPartitioner p = metadata.partitioner; +- AbstractBounds bounds = new Bounds<>(PartitionPosition.ForKey.get(index_clause.start_key, p), +- p.getMinimumToken().minKeyBound()); +- +- int nowInSec = FBUtilities.nowInSeconds(); +- ColumnFilter columns = makeColumnFilter(metadata, column_parent, column_predicate); +- ClusteringIndexFilter filter = toInternalFilter(metadata, column_parent, column_predicate); +- DataLimits limits = getLimits(index_clause.count, metadata.isSuper() && !column_parent.isSetSuper_column(), column_predicate); +- PartitionRangeReadCommand cmd = new PartitionRangeReadCommand(false, +- 0, +- true, +- metadata, +- nowInSec, +- columns, +- ThriftConversion.rowFilterFromThrift(metadata, index_clause.expressions), +- limits, +- new DataRange(bounds, filter), +- Optional.empty()); +- // If there's a secondary index that the command can use, have it validate +- // the request parameters. Note that as a side effect, if a viable Index is +- // identified by the CFS's index manager, it will be cached in the command +- // and serialized during distribution to replicas in order to avoid performing +- // further lookups. +- cmd.maybeValidateIndex(); +- +- try (PartitionIterator results = StorageProxy.getRangeSlice(cmd, consistencyLevel)) +- { +- return thriftifyKeySlices(results, column_parent, limits.perPartitionCount()); +- } +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- catch (RequestExecutionException e) +- { +- throw ThriftConversion.rethrow(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- public List describe_keyspaces() throws TException, InvalidRequestException +- { +- validateLogin(); +- +- Set keyspaces = Schema.instance.getKeyspaces(); +- List ksset = new ArrayList<>(keyspaces.size()); +- for (String ks : keyspaces) +- { +- try +- { +- ksset.add(describe_keyspace(ks)); +- } +- catch (NotFoundException nfe) +- { +- logger.info("Failed to find metadata for keyspace '{}'. Continuing... ", ks); +- } +- } +- return ksset; +- } +- +- public String describe_cluster_name() throws TException +- { +- return DatabaseDescriptor.getClusterName(); +- } +- +- public String describe_version() throws TException +- { +- return cassandraConstants.VERSION; +- } +- +- public List describe_ring(String keyspace) throws InvalidRequestException +- { +- try +- { +- return StorageService.instance.describeRing(keyspace); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- @Override +- public List describe_local_ring(String keyspace) throws InvalidRequestException, TException +- { +- try +- { +- return StorageService.instance.describeLocalRing(keyspace); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- public Map describe_token_map() throws InvalidRequestException +- { +- return StorageService.instance.getTokenToEndpointMap(); +- } +- +- public String describe_partitioner() throws TException +- { +- return StorageService.instance.getPartitionerName(); +- } +- +- public String describe_snitch() throws TException +- { +- if (DatabaseDescriptor.getEndpointSnitch() instanceof DynamicEndpointSnitch) +- return ((DynamicEndpointSnitch)DatabaseDescriptor.getEndpointSnitch()).subsnitch.getClass().getName(); +- return DatabaseDescriptor.getEndpointSnitch().getClass().getName(); +- } +- +- @Deprecated +- public List describe_splits(String cfName, String start_token, String end_token, int keys_per_split) +- throws TException, InvalidRequestException +- { +- List splits = describe_splits_ex(cfName, start_token, end_token, keys_per_split); +- List result = new ArrayList<>(splits.size() + 1); +- +- result.add(splits.get(0).getStart_token()); +- for (CfSplit cfSplit : splits) +- result.add(cfSplit.getEnd_token()); +- +- return result; +- } +- +- public List describe_splits_ex(String cfName, String start_token, String end_token, int keys_per_split) +- throws InvalidRequestException, TException +- { +- try +- { +- Token.TokenFactory tf = StorageService.instance.getTokenFactory(); +- Range tr = new Range(tf.fromString(start_token), tf.fromString(end_token)); +- List, Long>> splits = +- StorageService.instance.getSplits(state().getKeyspace(), cfName, tr, keys_per_split); +- List result = new ArrayList<>(splits.size()); +- for (Pair, Long> split : splits) +- result.add(new CfSplit(split.left.left.toString(), split.left.right.toString(), split.right)); +- return result; +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- public void login(AuthenticationRequest auth_request) throws TException +- { +- try +- { +- state().login(DatabaseDescriptor.getAuthenticator().legacyAuthenticate(auth_request.getCredentials())); +- } +- catch (org.apache.cassandra.exceptions.AuthenticationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- /** +- * Schedule the current thread for access to the required services +- */ +- private void schedule(long timeoutMS) throws UnavailableException +- { +- try +- { +- requestScheduler.queue(Thread.currentThread(), state().getSchedulingValue(), timeoutMS); +- } +- catch (TimeoutException e) +- { +- throw new UnavailableException(); +- } +- } +- +- /** +- * Release count for the used up resources +- */ +- private void release() +- { +- requestScheduler.release(); +- } +- +- public String system_add_column_family(CfDef cf_def) throws TException +- { +- logger.trace("add_column_family"); +- +- try +- { +- ClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- cState.hasKeyspaceAccess(keyspace, Permission.CREATE); +- cf_def.unsetId(); // explicitly ignore any id set by client (Hector likes to set zero) +- CFMetaData cfm = ThriftConversion.fromThrift(cf_def); +- cfm.params.compaction.validate(); +- +- if (!cfm.getTriggers().isEmpty()) +- state().ensureIsSuper("Only superusers are allowed to add triggers."); +- +- MigrationManager.announceNewColumnFamily(cfm); +- return Schema.instance.getVersion().toString(); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- public String system_drop_column_family(String column_family) +- throws InvalidRequestException, SchemaDisagreementException, TException +- { +- logger.trace("drop_column_family"); +- +- ThriftClientState cState = state(); +- +- try +- { +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, column_family, Permission.DROP); +- +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_family); +- if (metadata.isView()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Cannot drop Materialized Views from Thrift"); +- +- MigrationManager.announceColumnFamilyDrop(keyspace, column_family); +- return Schema.instance.getVersion().toString(); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- public String system_add_keyspace(KsDef ks_def) +- throws InvalidRequestException, SchemaDisagreementException, TException +- { +- logger.trace("add_keyspace"); +- +- try +- { +- ThriftValidation.validateKeyspaceNotSystem(ks_def.name); +- state().hasAllKeyspacesAccess(Permission.CREATE); +- ThriftValidation.validateKeyspaceNotYetExisting(ks_def.name); +- +- // generate a meaningful error if the user setup keyspace and/or column definition incorrectly +- for (CfDef cf : ks_def.cf_defs) +- { +- if (!cf.getKeyspace().equals(ks_def.getName())) +- { +- throw new InvalidRequestException("CfDef (" + cf.getName() +") had a keyspace definition that did not match KsDef"); +- } +- } +- +- Collection cfDefs = new ArrayList(ks_def.cf_defs.size()); +- for (CfDef cf_def : ks_def.cf_defs) +- { +- cf_def.unsetId(); // explicitly ignore any id set by client (same as system_add_column_family) +- CFMetaData cfm = ThriftConversion.fromThrift(cf_def); +- +- if (!cfm.getTriggers().isEmpty()) +- state().ensureIsSuper("Only superusers are allowed to add triggers."); +- +- cfDefs.add(cfm); +- } +- MigrationManager.announceNewKeyspace(ThriftConversion.fromThrift(ks_def, cfDefs.toArray(new CFMetaData[cfDefs.size()]))); +- return Schema.instance.getVersion().toString(); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- public String system_drop_keyspace(String keyspace) +- throws InvalidRequestException, SchemaDisagreementException, TException +- { +- logger.trace("drop_keyspace"); +- +- try +- { +- ThriftValidation.validateKeyspaceNotSystem(keyspace); +- state().hasKeyspaceAccess(keyspace, Permission.DROP); +- +- MigrationManager.announceKeyspaceDrop(keyspace); +- return Schema.instance.getVersion().toString(); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- /** update an existing keyspace, but do not allow column family modifications. +- * @throws SchemaDisagreementException +- */ +- public String system_update_keyspace(KsDef ks_def) +- throws InvalidRequestException, SchemaDisagreementException, TException +- { +- logger.trace("update_keyspace"); +- +- try +- { +- ThriftValidation.validateKeyspaceNotSystem(ks_def.name); +- state().hasKeyspaceAccess(ks_def.name, Permission.ALTER); +- ThriftValidation.validateKeyspace(ks_def.name); +- if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0) +- throw new InvalidRequestException("Keyspace update must not contain any table definitions."); +- +- MigrationManager.announceKeyspaceUpdate(ThriftConversion.fromThrift(ks_def)); +- return Schema.instance.getVersion().toString(); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- public String system_update_column_family(CfDef cf_def) +- throws InvalidRequestException, SchemaDisagreementException, TException +- { +- logger.trace("update_column_family"); +- +- try +- { +- if (cf_def.keyspace == null || cf_def.name == null) +- throw new InvalidRequestException("Keyspace and CF name must be set."); +- +- state().hasColumnFamilyAccess(cf_def.keyspace, cf_def.name, Permission.ALTER); +- CFMetaData oldCfm = Schema.instance.getCFMetaData(cf_def.keyspace, cf_def.name); +- +- if (oldCfm == null) +- throw new InvalidRequestException("Could not find table definition to modify."); +- +- if (oldCfm.isView()) +- throw new InvalidRequestException("Cannot modify Materialized View table " + oldCfm.cfName + " as it may break the schema. You should use cqlsh to modify Materialized View tables instead."); +- if (!Iterables.isEmpty(View.findAll(cf_def.keyspace, cf_def.name))) +- throw new InvalidRequestException("Cannot modify table with Materialized View " + oldCfm.cfName + " as it may break the schema. You should use cqlsh to modify tables with Materialized Views instead."); +- +- if (!oldCfm.isThriftCompatible()) +- throw new InvalidRequestException("Cannot modify CQL3 table " + oldCfm.cfName + " as it may break the schema. You should use cqlsh to modify CQL3 tables instead."); +- +- CFMetaData cfm = ThriftConversion.fromThriftForUpdate(cf_def, oldCfm); +- cfm.params.compaction.validate(); +- +- if (!oldCfm.getTriggers().equals(cfm.getTriggers())) +- state().ensureIsSuper("Only superusers are allowed to add or remove triggers."); +- +- MigrationManager.announceColumnFamilyUpdate(cfm); +- return Schema.instance.getVersion().toString(); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TimedOutException, TException +- { +- ClientState cState = state(); +- +- try +- { +- String keyspace = cState.getKeyspace(); +- cState.hasColumnFamilyAccess(keyspace, cfname, Permission.MODIFY); +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, cfname, false); +- if (metadata.isView()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Cannot truncate Materialized Views"); +- +- if (startSessionIfRequested()) +- { +- Tracing.instance.begin("truncate", ImmutableMap.of("cf", cfname, "ks", keyspace)); +- } +- else +- { +- logger.trace("truncating {}.{}", cState.getKeyspace(), cfname); +- } +- +- schedule(DatabaseDescriptor.getTruncateRpcTimeout()); +- try +- { +- StorageProxy.truncateBlocking(cState.getKeyspace(), cfname); +- } +- finally +- { +- release(); +- } +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- catch (org.apache.cassandra.exceptions.UnavailableException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- catch (TimeoutException e) +- { +- throw new TimedOutException(); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- public void set_keyspace(String keyspace) throws InvalidRequestException, TException +- { +- try +- { +- state().setKeyspace(keyspace); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- public Map> describe_schema_versions() throws TException, InvalidRequestException +- { +- logger.trace("checking schema agreement"); +- return StorageProxy.describeSchemaVersions(); +- } +- +- // counter methods +- +- public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException, TException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of("column_parent", column_parent.toString(), +- "column", column.toString(), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("add", traceParameters); +- } +- else +- { +- logger.trace("add"); +- } +- +- try +- { +- ClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- +- cState.hasColumnFamilyAccess(keyspace, column_parent.column_family, Permission.MODIFY); +- +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, true); +- if (metadata.isView()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Cannot modify Materialized Views directly"); +- +- ThriftValidation.validateKey(metadata, key); +- ThriftConversion.fromThrift(consistency_level).validateCounterForWrite(metadata); +- ThriftValidation.validateColumnParent(metadata, column_parent); +- // SuperColumn field is usually optional, but not when we're adding +- if (metadata.isSuper() && column_parent.super_column == null) +- throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family); +- +- ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name)); +- +- try +- { +- LegacyLayout.LegacyCellName name = LegacyLayout.decodeCellName(metadata, column_parent.super_column, column.name); +- +- // See UpdateParameters.addCounter() for more details on this +- ByteBuffer value = CounterContext.instance().createLocal(column.value); +- CellPath path = name.collectionElement == null ? null : CellPath.create(name.collectionElement); +- Cell cell = BufferCell.live(name.column, FBUtilities.timestampMicros(), value, path); +- +- PartitionUpdate update = PartitionUpdate.singleRowUpdate(metadata, key, BTreeRow.singleCellRow(name.clustering, cell)); +- +- org.apache.cassandra.db.Mutation mutation = new org.apache.cassandra.db.Mutation(update); +- doInsert(consistency_level, Arrays.asList(new CounterMutation(mutation, ThriftConversion.fromThrift(consistency_level)))); +- } +- catch (MarshalException|UnknownColumnException e) +- { +- throw new InvalidRequestException(e.getMessage()); +- } +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level) +- throws InvalidRequestException, UnavailableException, TimedOutException, TException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of("key", ByteBufferUtil.bytesToHex(key), +- "column_path", path.toString(), +- "consistency_level", consistency_level.name()); +- Tracing.instance.begin("remove_counter", traceParameters); +- } +- else +- { +- logger.trace("remove_counter"); +- } +- +- try +- { +- internal_remove(key, path, FBUtilities.timestampMicros(), consistency_level, true); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- private static String uncompress(ByteBuffer query, Compression compression) throws InvalidRequestException +- { +- String queryString = null; +- +- // Decompress the query string. +- try +- { +- switch (compression) +- { +- case GZIP: +- DataOutputBuffer decompressed = new DataOutputBuffer(); +- byte[] outBuffer = new byte[1024], inBuffer = new byte[1024]; +- +- Inflater decompressor = new Inflater(); +- +- int lenRead = 0; +- while (true) +- { +- if (decompressor.needsInput()) +- lenRead = query.remaining() < 1024 ? query.remaining() : 1024; +- query.get(inBuffer, 0, lenRead); +- decompressor.setInput(inBuffer, 0, lenRead); +- +- int lenWrite = 0; +- while ((lenWrite = decompressor.inflate(outBuffer)) != 0) +- decompressed.write(outBuffer, 0, lenWrite); +- +- if (decompressor.finished()) +- break; +- } +- +- decompressor.end(); +- +- queryString = new String(decompressed.getData(), 0, decompressed.getLength(), StandardCharsets.UTF_8); +- break; +- case NONE: +- try +- { +- queryString = ByteBufferUtil.string(query); +- } +- catch (CharacterCodingException ex) +- { +- throw new InvalidRequestException(ex.getMessage()); +- } +- break; +- } +- } +- catch (DataFormatException e) +- { +- throw new InvalidRequestException("Error deflating query string."); +- } +- catch (IOException e) +- { +- throw new AssertionError(e); +- } +- return queryString; +- } +- +- public CqlResult execute_cql_query(ByteBuffer query, Compression compression) throws TException +- { +- throw new InvalidRequestException("CQL2 has been removed in Cassandra 3.0. Please use CQL3 instead"); +- } +- +- public CqlResult execute_cql3_query(ByteBuffer query, Compression compression, ConsistencyLevel cLevel) throws TException +- { +- try +- { +- String queryString = uncompress(query, compression); +- if (startSessionIfRequested()) +- { +- Tracing.instance.begin("execute_cql3_query", +- ImmutableMap.of("query", queryString, +- "consistency_level", cLevel.name())); +- } +- else +- { +- logger.trace("execute_cql3_query"); +- } +- +- ThriftClientState cState = state(); +- return ClientState.getCQLQueryHandler().process(queryString, +- cState.getQueryState(), +- QueryOptions.fromThrift(ThriftConversion.fromThrift(cLevel), +- Collections.emptyList()), +- null).toThriftResult(); +- } +- catch (RequestExecutionException e) +- { +- throw ThriftConversion.rethrow(e); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- public CqlPreparedResult prepare_cql_query(ByteBuffer query, Compression compression) throws TException +- { +- throw new InvalidRequestException("CQL2 has been removed in Cassandra 3.0. Please use CQL3 instead"); +- } +- +- public CqlPreparedResult prepare_cql3_query(ByteBuffer query, Compression compression) throws TException +- { +- logger.trace("prepare_cql3_query"); +- +- String queryString = uncompress(query, compression); +- ThriftClientState cState = state(); +- +- try +- { +- cState.validateLogin(); +- return ClientState.getCQLQueryHandler().prepare(queryString, cState.getQueryState(), null).toThriftPreparedResult(); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- } +- +- public CqlResult execute_prepared_cql_query(int itemId, List bindVariables) throws TException +- { +- throw new InvalidRequestException("CQL2 has been removed in Cassandra 3.0. Please use CQL3 instead"); +- } +- +- public CqlResult execute_prepared_cql3_query(int itemId, List bindVariables, ConsistencyLevel cLevel) throws TException +- { +- if (startSessionIfRequested()) +- { +- // TODO we don't have [typed] access to CQL bind variables here. CASSANDRA-4560 is open to add support. +- Tracing.instance.begin("execute_prepared_cql3_query", ImmutableMap.of("consistency_level", cLevel.name())); +- } +- else +- { +- logger.trace("execute_prepared_cql3_query"); +- } +- +- try +- { +- ThriftClientState cState = state(); +- ParsedStatement.Prepared prepared = ClientState.getCQLQueryHandler().getPreparedForThrift(itemId); +- +- if (prepared == null) +- throw new InvalidRequestException(String.format("Prepared query with ID %d not found" + +- " (either the query was not prepared on this host (maybe the host has been restarted?)" + +- " or you have prepared too many queries and it has been evicted from the internal cache)", +- itemId)); +- logger.trace("Retrieved prepared statement #{} with {} bind markers", itemId, prepared.statement.getBoundTerms()); +- +- return ClientState.getCQLQueryHandler().processPrepared(prepared.statement, +- cState.getQueryState(), +- QueryOptions.fromThrift(ThriftConversion.fromThrift(cLevel), bindVariables), +- null).toThriftResult(); +- } +- catch (RequestExecutionException e) +- { +- throw ThriftConversion.rethrow(e); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- @Override +- public List get_multi_slice(MultiSliceRequest request) +- throws InvalidRequestException, UnavailableException, TimedOutException +- { +- if (startSessionIfRequested()) +- { +- Map traceParameters = ImmutableMap.of("key", ByteBufferUtil.bytesToHex(request.key), +- "column_parent", request.column_parent.toString(), +- "consistency_level", request.consistency_level.name(), +- "count", String.valueOf(request.count), +- "column_slices", request.column_slices.toString()); +- Tracing.instance.begin("get_multi_slice", traceParameters); +- } +- else +- { +- logger.trace("get_multi_slice"); +- } +- try +- { +- ClientState cState = state(); +- String keyspace = cState.getKeyspace(); +- state().hasColumnFamilyAccess(keyspace, request.getColumn_parent().column_family, Permission.SELECT); +- CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, request.getColumn_parent().column_family); +- if (metadata.isSuper()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("get_multi_slice does not support super columns"); +- ThriftValidation.validateColumnParent(metadata, request.getColumn_parent()); +- org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(request.getConsistency_level()); +- consistencyLevel.validateForRead(keyspace); +- +- Slices.Builder builder = new Slices.Builder(metadata.comparator, request.getColumn_slices().size()); +- for (int i = 0 ; i < request.getColumn_slices().size() ; i++) +- { +- fixOptionalSliceParameters(request.getColumn_slices().get(i)); +- ClusteringBound start = LegacyLayout.decodeBound(metadata, request.getColumn_slices().get(i).start, true).bound; +- ClusteringBound finish = LegacyLayout.decodeBound(metadata, request.getColumn_slices().get(i).finish, false).bound; +- +- int compare = metadata.comparator.compare(start, finish); +- if (!request.reversed && compare > 0) +- throw new InvalidRequestException(String.format("Column slice at index %d had start greater than finish", i)); +- else if (request.reversed && compare < 0) +- throw new InvalidRequestException(String.format("Reversed column slice at index %d had start less than finish", i)); +- +- builder.add(request.reversed ? Slice.make(finish, start) : Slice.make(start, finish)); +- } +- +- Slices slices = builder.build(); +- ColumnFilter columns = makeColumnFilter(metadata, slices); +- ClusteringIndexSliceFilter filter = new ClusteringIndexSliceFilter(slices, request.reversed); +- DataLimits limits = getLimits(1, false, request.count); +- +- ThriftValidation.validateKey(metadata, request.key); +- DecoratedKey dk = metadata.decorateKey(request.key); +- SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(true, metadata, FBUtilities.nowInSeconds(), columns, RowFilter.NONE, limits, dk, filter); +- return getSlice(Collections.singletonList(cmd), +- false, +- limits.perPartitionCount(), +- consistencyLevel, +- cState).entrySet().iterator().next().getValue(); +- } +- catch (RequestValidationException e) +- { +- throw ThriftConversion.toThrift(e); +- } +- finally +- { +- Tracing.instance.stopSession(); +- } +- } +- +- /** +- * Set the to start-of end-of value of "" for start and finish. +- * @param columnSlice +- */ +- private static void fixOptionalSliceParameters(org.apache.cassandra.thrift.ColumnSlice columnSlice) { +- if (!columnSlice.isSetStart()) +- columnSlice.setStart(new byte[0]); +- if (!columnSlice.isSetFinish()) +- columnSlice.setFinish(new byte[0]); +- } +- +- /* +- * No-op since 3.0. +- */ +- public void set_cql_version(String version) +- { +- } +- +- public ByteBuffer trace_next_query() throws TException +- { +- UUID sessionId = UUIDGen.getTimeUUID(); +- state().getQueryState().prepareTracingSession(sessionId); +- return TimeUUIDType.instance.decompose(sessionId); +- } +- +- private boolean startSessionIfRequested() +- { +- if (state().getQueryState().traceNextQuery()) +- { +- state().getQueryState().createTracingSession(); +- return true; +- } +- return false; +- } +- +- private void registerMetrics() +- { +- ClientMetrics.instance.addCounter("connectedThriftClients", new Callable() +- { +- @Override +- public Integer call() throws Exception +- { +- return ThriftSessionManager.instance.getConnectedClients(); +- } +- }); +- } +- +- private static class ThriftCASRequest implements CASRequest +- { +- private final CFMetaData metadata; +- private final DecoratedKey key; +- private final List expected; +- private final PartitionUpdate updates; +- private final int nowInSec; +- +- private ThriftCASRequest(List expected, PartitionUpdate updates, int nowInSec) +- { +- this.metadata = updates.metadata(); +- this.key = updates.partitionKey(); +- this.expected = expected; +- this.updates = updates; +- this.nowInSec = nowInSec; +- } +- +- public SinglePartitionReadCommand readCommand(int nowInSec) +- { +- if (expected.isEmpty()) +- { +- // We want to know if the partition exists, so just fetch a single cell. +- ClusteringIndexSliceFilter filter = new ClusteringIndexSliceFilter(Slices.ALL, false); +- DataLimits limits = DataLimits.thriftLimits(1, 1); +- return new SinglePartitionReadCommand(false, 0, true, metadata, nowInSec, ColumnFilter.all(metadata), RowFilter.NONE, limits, key, filter); +- } +- +- // Gather the clustering for the expected values and query those. +- BTreeSet.Builder clusterings = BTreeSet.builder(metadata.comparator); +- FilteredPartition expectedPartition = +- FilteredPartition.create(LegacyLayout.toRowIterator(metadata, key, expected.iterator(), nowInSec)); +- +- for (Row row : expectedPartition) +- clusterings.add(row.clustering()); +- +- PartitionColumns columns = expectedPartition.staticRow().isEmpty() +- ? metadata.partitionColumns().withoutStatics() +- : metadata.partitionColumns(); +- ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings.build(), false); +- return SinglePartitionReadCommand.create(true, metadata, nowInSec, ColumnFilter.selection(columns), RowFilter.NONE, DataLimits.NONE, key, filter); +- } +- +- public boolean appliesTo(FilteredPartition current) +- { +- if (expected.isEmpty()) +- return current.isEmpty(); +- else if (current.isEmpty()) +- return false; +- +- // Push the expected results through ThriftResultsMerger to translate any static +- // columns into clusterings. The current partition is retrieved in the same so +- // unless they're both handled the same, they won't match. +- FilteredPartition expectedPartition = +- FilteredPartition.create( +- UnfilteredRowIterators.filter( +- ThriftResultsMerger.maybeWrap(expectedToUnfilteredRowIterator(), nowInSec), nowInSec)); +- +- // Check that for everything we expected, the fetched values exists and correspond. +- for (Row e : expectedPartition) +- { +- Row c = current.getRow(e.clustering()); +- if (c == null) +- return false; +- +- SearchIterator searchIter = c.searchIterator(); +- for (ColumnData expectedData : e) +- { +- ColumnDefinition column = expectedData.column(); +- ColumnData currentData = searchIter.next(column); +- if (currentData == null) +- return false; +- +- if (column.isSimple()) +- { +- if (!((Cell)currentData).value().equals(((Cell)expectedData).value())) +- return false; +- } +- else +- { +- ComplexColumnData currentComplexData = (ComplexColumnData)currentData; +- for (Cell expectedCell : (ComplexColumnData)expectedData) +- { +- Cell currentCell = currentComplexData.getCell(expectedCell.path()); +- if (currentCell == null || !currentCell.value().equals(expectedCell.value())) +- return false; +- } +- } +- } +- } +- return true; +- } +- +- public PartitionUpdate makeUpdates(FilteredPartition current) +- { +- return updates; +- } +- +- private UnfilteredRowIterator expectedToUnfilteredRowIterator() +- { +- return LegacyLayout.toUnfilteredRowIterator(metadata, key, LegacyLayout.LegacyDeletionInfo.live(), expected.iterator()); +- } +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/CustomTNonBlockingServer.java b/src/java/org/apache/cassandra/thrift/CustomTNonBlockingServer.java +deleted file mode 100644 +index de8df57..0000000 +--- a/src/java/org/apache/cassandra/thrift/CustomTNonBlockingServer.java ++++ /dev/null +@@ -1,91 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.net.InetSocketAddress; +- +-import java.nio.channels.SelectionKey; +- +-import org.apache.cassandra.config.DatabaseDescriptor; +-import org.apache.thrift.server.TNonblockingServer; +-import org.apache.thrift.server.TServer; +-import org.apache.thrift.transport.TNonblockingServerTransport; +-import org.apache.thrift.transport.TNonblockingSocket; +-import org.apache.thrift.transport.TNonblockingTransport; +-import org.apache.thrift.transport.TTransportException; +- +-public class CustomTNonBlockingServer extends TNonblockingServer +-{ +- public CustomTNonBlockingServer(Args args) +- { +- super(args); +- } +- +- @Override +- @SuppressWarnings("resource") +- protected boolean requestInvoke(FrameBuffer frameBuffer) +- { +- TNonblockingSocket socket = (TNonblockingSocket)((CustomFrameBuffer)frameBuffer).getTransport(); +- ThriftSessionManager.instance.setCurrentSocket(socket.getSocketChannel().socket().getRemoteSocketAddress()); +- frameBuffer.invoke(); +- return true; +- } +- +- public static class Factory implements TServerFactory +- { +- @SuppressWarnings("resource") +- public TServer buildTServer(Args args) +- { +- if (DatabaseDescriptor.getClientEncryptionOptions().enabled) +- throw new RuntimeException("Client SSL is not supported for non-blocking sockets. Please remove client ssl from the configuration."); +- +- final InetSocketAddress addr = args.addr; +- TNonblockingServerTransport serverTransport; +- try +- { +- serverTransport = new TCustomNonblockingServerSocket(addr, args.keepAlive, args.sendBufferSize, args.recvBufferSize); +- } +- catch (TTransportException e) +- { +- throw new RuntimeException(String.format("Unable to create thrift socket to %s:%s", addr.getAddress(), addr.getPort()), e); +- } +- +- // This is single threaded hence the invocation will be all +- // in one thread. +- TNonblockingServer.Args serverArgs = new TNonblockingServer.Args(serverTransport).inputTransportFactory(args.inTransportFactory) +- .outputTransportFactory(args.outTransportFactory) +- .inputProtocolFactory(args.tProtocolFactory) +- .outputProtocolFactory(args.tProtocolFactory) +- .processor(args.processor); +- return new CustomTNonBlockingServer(serverArgs); +- } +- } +- +- public class CustomFrameBuffer extends FrameBuffer +- { +- public CustomFrameBuffer(final TNonblockingTransport trans, +- final SelectionKey selectionKey, +- final AbstractSelectThread selectThread) { +- super(trans, selectionKey, selectThread); +- } +- +- public TNonblockingTransport getTransport() { +- return this.trans_; +- } +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/CustomTThreadPoolServer.java b/src/java/org/apache/cassandra/thrift/CustomTThreadPoolServer.java +deleted file mode 100644 +index 46da9d5..0000000 +--- a/src/java/org/apache/cassandra/thrift/CustomTThreadPoolServer.java ++++ /dev/null +@@ -1,288 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.net.InetSocketAddress; +-import java.net.SocketAddress; +-import java.net.SocketTimeoutException; +-import java.util.concurrent.ExecutorService; +-import java.util.concurrent.RejectedExecutionException; +-import java.util.concurrent.SynchronousQueue; +-import java.util.concurrent.ThreadPoolExecutor; +-import java.util.concurrent.TimeUnit; +-import java.util.concurrent.atomic.AtomicInteger; +- +-import javax.net.ssl.SSLServerSocket; +- +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.cassandra.concurrent.NamedThreadFactory; +-import org.apache.cassandra.config.DatabaseDescriptor; +-import org.apache.cassandra.config.EncryptionOptions.ClientEncryptionOptions; +-import org.apache.cassandra.utils.JVMStabilityInspector; +-import org.apache.cassandra.security.SSLFactory; +-import org.apache.thrift.TException; +-import org.apache.thrift.TProcessor; +-import org.apache.thrift.protocol.TProtocol; +-import org.apache.thrift.server.TServer; +-import org.apache.thrift.server.TThreadPoolServer; +-import org.apache.thrift.transport.TSSLTransportFactory; +-import org.apache.thrift.transport.TServerSocket; +-import org.apache.thrift.transport.TServerTransport; +-import org.apache.thrift.transport.TTransport; +-import org.apache.thrift.transport.TTransportException; +-import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters; +- +-import com.google.common.util.concurrent.Uninterruptibles; +- +- +-/** +- * Slightly modified version of the Apache Thrift TThreadPoolServer. +- *

    +- * This allows passing an executor so you have more control over the actual +- * behavior of the tasks being run. +- *

    +- * Newer version of Thrift should make this obsolete. +- */ +-public class CustomTThreadPoolServer extends TServer +-{ +- +- private static final Logger logger = LoggerFactory.getLogger(CustomTThreadPoolServer.class.getName()); +- +- // Executor service for handling client connections +- private final ExecutorService executorService; +- +- // Flag for stopping the server +- private volatile boolean stopped; +- +- // Server options +- private final TThreadPoolServer.Args args; +- +- //Track and Limit the number of connected clients +- private final AtomicInteger activeClients = new AtomicInteger(0); +- +- +- public CustomTThreadPoolServer(TThreadPoolServer.Args args, ExecutorService executorService) { +- super(args); +- this.executorService = executorService; +- this.args = args; +- } +- +- @SuppressWarnings("resource") +- public void serve() +- { +- try +- { +- serverTransport_.listen(); +- } +- catch (TTransportException ttx) +- { +- logger.error("Error occurred during listening.", ttx); +- return; +- } +- +- stopped = false; +- while (!stopped) +- { +- // block until we are under max clients +- while (activeClients.get() >= args.maxWorkerThreads) +- { +- Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS); +- } +- +- try +- { +- TTransport client = serverTransport_.accept(); +- activeClients.incrementAndGet(); +- WorkerProcess wp = new WorkerProcess(client); +- executorService.execute(wp); +- } +- catch (TTransportException ttx) +- { +- if (ttx.getCause() instanceof SocketTimeoutException) // thrift sucks +- continue; +- +- if (!stopped) +- { +- logger.warn("Transport error occurred during acceptance of message.", ttx); +- } +- } +- catch (RejectedExecutionException e) +- { +- // worker thread decremented activeClients but hadn't finished exiting +- logger.trace("Dropping client connection because our limit of {} has been reached", args.maxWorkerThreads); +- continue; +- } +- +- if (activeClients.get() >= args.maxWorkerThreads) +- logger.warn("Maximum number of clients {} reached", args.maxWorkerThreads); +- } +- +- executorService.shutdown(); +- // Thrift's default shutdown waits for the WorkerProcess threads to complete. We do not, +- // because doing that allows a client to hold our shutdown "hostage" by simply not sending +- // another message after stop is called (since process will block indefinitely trying to read +- // the next meessage header). +- // +- // The "right" fix would be to update thrift to set a socket timeout on client connections +- // (and tolerate unintentional timeouts until stopped is set). But this requires deep +- // changes to the code generator, so simply setting these threads to daemon (in our custom +- // CleaningThreadPool) and ignoring them after shutdown is good enough. +- // +- // Remember, our goal on shutdown is not necessarily that each client request we receive +- // gets answered first [to do that, you should redirect clients to a different coordinator +- // first], but rather (1) to make sure that for each update we ack as successful, we generate +- // hints for any non-responsive replicas, and (2) to make sure that we quickly stop +- // accepting client connections so shutdown can continue. Not waiting for the WorkerProcess +- // threads here accomplishes (2); MessagingService's shutdown method takes care of (1). +- // +- // See CASSANDRA-3335 and CASSANDRA-3727. +- } +- +- public void stop() +- { +- stopped = true; +- serverTransport_.interrupt(); +- } +- +- private class WorkerProcess implements Runnable +- { +- +- /** +- * Client that this services. +- */ +- private TTransport client_; +- +- /** +- * Default constructor. +- * +- * @param client Transport to process +- */ +- private WorkerProcess(TTransport client) +- { +- client_ = client; +- } +- +- /** +- * Loops on processing a client forever +- */ +- public void run() +- { +- TProcessor processor = null; +- TProtocol inputProtocol = null; +- TProtocol outputProtocol = null; +- SocketAddress socket = null; +- try (TTransport inputTransport = inputTransportFactory_.getTransport(client_); +- TTransport outputTransport = outputTransportFactory_.getTransport(client_)) +- { +- socket = ((TCustomSocket) client_).getSocket().getRemoteSocketAddress(); +- ThriftSessionManager.instance.setCurrentSocket(socket); +- processor = processorFactory_.getProcessor(client_); +- +- inputProtocol = inputProtocolFactory_.getProtocol(inputTransport); +- outputProtocol = outputProtocolFactory_.getProtocol(outputTransport); +- // we check stopped first to make sure we're not supposed to be shutting +- // down. this is necessary for graceful shutdown. (but not sufficient, +- // since process() can take arbitrarily long waiting for client input. +- // See comments at the end of serve().) +- while (!stopped && processor.process(inputProtocol, outputProtocol)) +- { +- inputProtocol = inputProtocolFactory_.getProtocol(inputTransport); +- outputProtocol = outputProtocolFactory_.getProtocol(outputTransport); +- } +- } +- catch (TTransportException ttx) +- { +- // Assume the client died and continue silently +- // Log at debug to allow debugging of "frame too large" errors (see CASSANDRA-3142). +- logger.trace("Thrift transport error occurred during processing of message.", ttx); +- } +- catch (TException tx) +- { +- logger.error("Thrift error occurred during processing of message.", tx); +- } +- catch (Exception e) +- { +- JVMStabilityInspector.inspectThrowable(e); +- logger.error("Error occurred during processing of message.", e); +- } +- finally +- { +- if (socket != null) +- ThriftSessionManager.instance.connectionComplete(socket); +- +- activeClients.decrementAndGet(); +- } +- } +- } +- +- public static class Factory implements TServerFactory +- { +- @SuppressWarnings("resource") +- public TServer buildTServer(Args args) +- { +- final InetSocketAddress addr = args.addr; +- TServerTransport serverTransport; +- try +- { +- final ClientEncryptionOptions clientEnc = DatabaseDescriptor.getClientEncryptionOptions(); +- if (clientEnc.enabled) +- { +- logger.info("enabling encrypted thrift connections between client and server"); +- TSSLTransportParameters params = new TSSLTransportParameters(clientEnc.protocol, new String[0]); +- params.setKeyStore(clientEnc.keystore, clientEnc.keystore_password); +- if (clientEnc.require_client_auth) +- { +- params.setTrustStore(clientEnc.truststore, clientEnc.truststore_password); +- params.requireClientAuth(true); +- } +- TServerSocket sslServer = TSSLTransportFactory.getServerSocket(addr.getPort(), 0, addr.getAddress(), params); +- SSLServerSocket sslServerSocket = (SSLServerSocket) sslServer.getServerSocket(); +- String[] suites = SSLFactory.filterCipherSuites(sslServerSocket.getSupportedCipherSuites(), clientEnc.cipher_suites); +- sslServerSocket.setEnabledCipherSuites(suites); +- serverTransport = new TCustomServerSocket(sslServerSocket, args.keepAlive, args.sendBufferSize, args.recvBufferSize); +- } +- else +- { +- serverTransport = new TCustomServerSocket(addr, args.keepAlive, args.sendBufferSize, args.recvBufferSize, args.listenBacklog); +- } +- } +- catch (TTransportException e) +- { +- throw new RuntimeException(String.format("Unable to create thrift socket to %s:%s", addr.getAddress(), addr.getPort()), e); +- } +- // ThreadPool Server and will be invocation per connection basis... +- TThreadPoolServer.Args serverArgs = new TThreadPoolServer.Args(serverTransport) +- .minWorkerThreads(DatabaseDescriptor.getRpcMinThreads()) +- .maxWorkerThreads(DatabaseDescriptor.getRpcMaxThreads()) +- .inputTransportFactory(args.inTransportFactory) +- .outputTransportFactory(args.outTransportFactory) +- .inputProtocolFactory(args.tProtocolFactory) +- .outputProtocolFactory(args.tProtocolFactory) +- .processor(args.processor); +- ExecutorService executorService = new ThreadPoolExecutor(serverArgs.minWorkerThreads, +- serverArgs.maxWorkerThreads, +- 60, +- TimeUnit.SECONDS, +- new SynchronousQueue(), +- new NamedThreadFactory("Thrift")); +- return new CustomTThreadPoolServer(serverArgs, executorService); +- } +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/ITransportFactory.java b/src/java/org/apache/cassandra/thrift/ITransportFactory.java +deleted file mode 100644 +index 7a65728..0000000 +--- a/src/java/org/apache/cassandra/thrift/ITransportFactory.java ++++ /dev/null +@@ -1,64 +0,0 @@ +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +-package org.apache.cassandra.thrift; +- +-import java.util.Map; +-import java.util.Set; +- +-import org.apache.thrift.transport.TTransport; +- +-/** +- * Transport factory for establishing thrift connections from clients to a remote server. +- */ +-public interface ITransportFactory +-{ +- static final String PROPERTY_KEY = "cassandra.client.transport.factory"; +- +- /** +- * Opens a client transport to a thrift server. +- * Example: +- * +- *
    +-     * TTransport transport = clientTransportFactory.openTransport(address, port);
    +-     * Cassandra.Iface client = new Cassandra.Client(new BinaryProtocol(transport));
    +-     * 
    +- * +- * @param host fully qualified hostname of the server +- * @param port RPC port of the server +- * @return open and ready to use transport +- * @throws Exception implementation defined; usually throws TTransportException or IOException +- * if the connection cannot be established +- */ +- TTransport openTransport(String host, int port) throws Exception; +- +- /** +- * Sets an implementation defined set of options. +- * Keys in this map must conform to the set set returned by ITransportFactory#supportedOptions. +- * @param options option map +- */ +- void setOptions(Map options); +- +- /** +- * @return set of options supported by this transport factory implementation +- */ +- Set supportedOptions(); +-} +- +diff --git a/src/java/org/apache/cassandra/thrift/SSLTransportFactory.java b/src/java/org/apache/cassandra/thrift/SSLTransportFactory.java +deleted file mode 100644 +index ea74b94..0000000 +--- a/src/java/org/apache/cassandra/thrift/SSLTransportFactory.java ++++ /dev/null +@@ -1,88 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import com.google.common.collect.Sets; +-import org.apache.thrift.transport.TFramedTransport; +-import org.apache.thrift.transport.TSSLTransportFactory; +-import org.apache.thrift.transport.TTransport; +- +-import java.util.Map; +-import java.util.Set; +- +-public class SSLTransportFactory implements ITransportFactory +-{ +- public static final int DEFAULT_MAX_FRAME_SIZE = 15 * 1024 * 1024; // 15 MiB +- +- public static final String TRUSTSTORE = "enc.truststore"; +- public static final String TRUSTSTORE_PASSWORD = "enc.truststore.password"; +- public static final String KEYSTORE = "enc.keystore"; +- public static final String KEYSTORE_PASSWORD = "enc.keystore.password"; +- public static final String PROTOCOL = "enc.protocol"; +- public static final String CIPHER_SUITES = "enc.cipher.suites"; +- public static final int SOCKET_TIMEOUT = 0; +- +- private static final Set SUPPORTED_OPTIONS = Sets.newHashSet(TRUSTSTORE, +- TRUSTSTORE_PASSWORD, +- KEYSTORE, +- KEYSTORE_PASSWORD, +- PROTOCOL, +- CIPHER_SUITES); +- +- private String truststore; +- private String truststorePassword; +- private String keystore; +- private String keystorePassword; +- private String protocol; +- private String[] cipherSuites; +- +- @Override +- @SuppressWarnings("resource") +- public TTransport openTransport(String host, int port) throws Exception +- { +- TSSLTransportFactory.TSSLTransportParameters params = new TSSLTransportFactory.TSSLTransportParameters(protocol, cipherSuites); +- params.setTrustStore(truststore, truststorePassword); +- if (null != keystore) +- params.setKeyStore(keystore, keystorePassword); +- TTransport trans = TSSLTransportFactory.getClientSocket(host, port, SOCKET_TIMEOUT, params); +- return new TFramedTransport(trans, DEFAULT_MAX_FRAME_SIZE); +- } +- +- @Override +- public void setOptions(Map options) +- { +- if (options.containsKey(TRUSTSTORE)) +- truststore = options.get(TRUSTSTORE); +- if (options.containsKey(TRUSTSTORE_PASSWORD)) +- truststorePassword = options.get(TRUSTSTORE_PASSWORD); +- if (options.containsKey(KEYSTORE)) +- keystore = options.get(KEYSTORE); +- if (options.containsKey(KEYSTORE_PASSWORD)) +- keystorePassword = options.get(KEYSTORE_PASSWORD); +- if (options.containsKey(PROTOCOL)) +- protocol = options.get(PROTOCOL); +- if (options.containsKey(CIPHER_SUITES)) +- cipherSuites = options.get(CIPHER_SUITES).split(","); +- } +- +- @Override +- public Set supportedOptions() +- { +- return SUPPORTED_OPTIONS; +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/TCustomNonblockingServerSocket.java b/src/java/org/apache/cassandra/thrift/TCustomNonblockingServerSocket.java +deleted file mode 100644 +index a430721..0000000 +--- a/src/java/org/apache/cassandra/thrift/TCustomNonblockingServerSocket.java ++++ /dev/null +@@ -1,87 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.net.InetSocketAddress; +-import java.net.Socket; +-import java.net.SocketException; +- +-import org.apache.thrift.transport.TNonblockingServerSocket; +-import org.apache.thrift.transport.TNonblockingSocket; +-import org.apache.thrift.transport.TTransportException; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-public class TCustomNonblockingServerSocket extends TNonblockingServerSocket +-{ +- private static final Logger logger = LoggerFactory.getLogger(TCustomNonblockingServerSocket.class); +- private final boolean keepAlive; +- private final Integer sendBufferSize; +- private final Integer recvBufferSize; +- +- public TCustomNonblockingServerSocket(InetSocketAddress bindAddr, boolean keepAlive, Integer sendBufferSize, Integer recvBufferSize) throws TTransportException +- { +- super(bindAddr); +- this.keepAlive = keepAlive; +- this.sendBufferSize = sendBufferSize; +- this.recvBufferSize = recvBufferSize; +- } +- +- @Override +- @SuppressWarnings("resource") +- protected TNonblockingSocket acceptImpl() throws TTransportException +- { +- TNonblockingSocket tsocket = super.acceptImpl(); +- if (tsocket == null || tsocket.getSocketChannel() == null) +- return tsocket; +- Socket socket = tsocket.getSocketChannel().socket(); +- try +- { +- socket.setKeepAlive(this.keepAlive); +- } +- catch (SocketException se) +- { +- logger.warn("Failed to set keep-alive on Thrift socket.", se); +- } +- +- if (this.sendBufferSize != null) +- { +- try +- { +- socket.setSendBufferSize(this.sendBufferSize.intValue()); +- } +- catch (SocketException se) +- { +- logger.warn("Failed to set send buffer size on Thrift socket.", se); +- } +- } +- +- if (this.recvBufferSize != null) +- { +- try +- { +- socket.setReceiveBufferSize(this.recvBufferSize.intValue()); +- } +- catch (SocketException se) +- { +- logger.warn("Failed to set receive buffer size on Thrift socket.", se); +- } +- } +- return tsocket; +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/TCustomServerSocket.java b/src/java/org/apache/cassandra/thrift/TCustomServerSocket.java +deleted file mode 100644 +index 8e27481..0000000 +--- a/src/java/org/apache/cassandra/thrift/TCustomServerSocket.java ++++ /dev/null +@@ -1,189 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +- +-import java.io.IOException; +-import java.net.InetSocketAddress; +-import java.net.ServerSocket; +-import java.net.Socket; +-import java.net.SocketException; +- +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.thrift.transport.TServerTransport; +-import org.apache.thrift.transport.TTransportException; +- +-/** +- * Extends Thrift's TServerSocket to allow customization of various desirable TCP properties. +- */ +-public class TCustomServerSocket extends TServerTransport +-{ +- +- private static final Logger logger = LoggerFactory.getLogger(TCustomServerSocket.class); +- +- /** +- * Underlying serversocket object +- */ +- private ServerSocket serverSocket = null; +- +- private final boolean keepAlive; +- private final Integer sendBufferSize; +- private final Integer recvBufferSize; +- +- /** +- * Allows fine-tuning of the server socket including keep-alive, reuse of addresses, send and receive buffer sizes. +- * +- * @param bindAddr +- * @param keepAlive +- * @param sendBufferSize +- * @param recvBufferSize +- * @throws TTransportException +- */ +- public TCustomServerSocket(InetSocketAddress bindAddr, boolean keepAlive, Integer sendBufferSize, +- Integer recvBufferSize, Integer listenBacklog) +- throws TTransportException +- { +- try +- { +- // Make server socket +- serverSocket = new ServerSocket(); +- // Prevent 2MSL delay problem on server restarts +- serverSocket.setReuseAddress(true); +- // Bind to listening port +- serverSocket.bind(bindAddr, listenBacklog); +- } +- catch (IOException ioe) +- { +- serverSocket = null; +- throw new TTransportException("Could not create ServerSocket on address " + bindAddr + "."); +- } +- +- this.keepAlive = keepAlive; +- this.sendBufferSize = sendBufferSize; +- this.recvBufferSize = recvBufferSize; +- } +- +- public TCustomServerSocket(ServerSocket socket, boolean keepAlive, Integer sendBufferSize, Integer recvBufferSize) +- { +- this.serverSocket = socket; +- this.keepAlive = keepAlive; +- this.sendBufferSize = sendBufferSize; +- this.recvBufferSize = recvBufferSize; +- } +- +- @Override +- @SuppressWarnings("resource") +- protected TCustomSocket acceptImpl() throws TTransportException +- { +- +- if (serverSocket == null) +- throw new TTransportException(TTransportException.NOT_OPEN, "No underlying server socket."); +- +- TCustomSocket tsocket = null; +- Socket socket = null; +- try +- { +- socket = serverSocket.accept(); +- tsocket = new TCustomSocket(socket); +- tsocket.setTimeout(0); +- } +- catch (IOException iox) +- { +- throw new TTransportException(iox); +- } +- +- try +- { +- socket.setKeepAlive(this.keepAlive); +- } +- catch (SocketException se) +- { +- logger.warn("Failed to set keep-alive on Thrift socket.", se); +- } +- +- if (this.sendBufferSize != null) +- { +- try +- { +- socket.setSendBufferSize(this.sendBufferSize.intValue()); +- } +- catch (SocketException se) +- { +- logger.warn("Failed to set send buffer size on Thrift socket.", se); +- } +- } +- +- if (this.recvBufferSize != null) +- { +- try +- { +- socket.setReceiveBufferSize(this.recvBufferSize.intValue()); +- } +- catch (SocketException se) +- { +- logger.warn("Failed to set receive buffer size on Thrift socket.", se); +- } +- } +- +- return tsocket; +- } +- +- @Override +- public void listen() throws TTransportException +- { +- // Make sure not to block on accept +- if (serverSocket != null) +- { +- try +- { +- serverSocket.setSoTimeout(100); +- } +- catch (SocketException sx) +- { +- logger.error("Could not set socket timeout.", sx); +- } +- } +- } +- +- @Override +- public void close() +- { +- if (serverSocket != null) +- { +- try +- { +- serverSocket.close(); +- } +- catch (IOException iox) +- { +- logger.warn("Could not close server socket.", iox); +- } +- serverSocket = null; +- } +- } +- +- @Override +- public void interrupt() +- { +- // The thread-safeness of this is dubious, but Java documentation suggests +- // that it is safe to do this from a different thread context +- close(); +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/TCustomSocket.java b/src/java/org/apache/cassandra/thrift/TCustomSocket.java +deleted file mode 100644 +index 1a5d538..0000000 +--- a/src/java/org/apache/cassandra/thrift/TCustomSocket.java ++++ /dev/null +@@ -1,210 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +- +-import java.io.BufferedInputStream; +-import java.io.BufferedOutputStream; +-import java.io.IOException; +-import java.net.InetSocketAddress; +-import java.net.Socket; +-import java.net.SocketException; +- +-import org.apache.thrift.transport.TIOStreamTransport; +-import org.apache.thrift.transport.TTransportException; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Socket implementation of the TTransport interface. +- * +- * Adds socket buffering +- * +- */ +-public class TCustomSocket extends TIOStreamTransport { +- +- private static final Logger LOGGER = LoggerFactory.getLogger(TCustomSocket.class.getName()); +- +- /** +- * Wrapped Socket object +- */ +- private Socket socket = null; +- +- /** +- * Remote host +- */ +- private String host = null; +- +- /** +- * Remote port +- */ +- private int port = 0; +- +- /** +- * Socket timeout +- */ +- private int timeout = 0; +- +- /** +- * Constructor that takes an already created socket. +- * +- * @param socket Already created socket object +- * @throws TTransportException if there is an error setting up the streams +- */ +- public TCustomSocket(Socket socket) throws TTransportException { +- this.socket = socket; +- try { +- socket.setSoLinger(false, 0); +- socket.setTcpNoDelay(true); +- } catch (SocketException sx) { +- LOGGER.warn("Could not configure socket.", sx); +- } +- +- if (isOpen()) { +- try { +- inputStream_ = new BufferedInputStream(socket.getInputStream(), 1024); +- outputStream_ = new BufferedOutputStream(socket.getOutputStream(), 1024); +- } catch (IOException iox) { +- close(); +- throw new TTransportException(TTransportException.NOT_OPEN, iox); +- } +- } +- } +- +- /** +- * Creates a new unconnected socket that will connect to the given host +- * on the given port. +- * +- * @param host Remote host +- * @param port Remote port +- */ +- public TCustomSocket(String host, int port) { +- this(host, port, 0); +- } +- +- /** +- * Creates a new unconnected socket that will connect to the given host +- * on the given port. +- * +- * @param host Remote host +- * @param port Remote port +- * @param timeout Socket timeout +- */ +- public TCustomSocket(String host, int port, int timeout) { +- this.host = host; +- this.port = port; +- this.timeout = timeout; +- initSocket(); +- } +- +- /** +- * Initializes the socket object +- */ +- private void initSocket() { +- socket = new Socket(); +- try { +- socket.setSoLinger(false, 0); +- socket.setTcpNoDelay(true); +- socket.setSoTimeout(timeout); +- } catch (SocketException sx) { +- LOGGER.error("Could not configure socket.", sx); +- } +- } +- +- /** +- * Sets the socket timeout +- * +- * @param timeout Milliseconds timeout +- */ +- public void setTimeout(int timeout) { +- this.timeout = timeout; +- try { +- socket.setSoTimeout(timeout); +- } catch (SocketException sx) { +- LOGGER.warn("Could not set socket timeout.", sx); +- } +- } +- +- /** +- * Returns a reference to the underlying socket. +- */ +- public Socket getSocket() { +- if (socket == null) { +- initSocket(); +- } +- return socket; +- } +- +- /** +- * Checks whether the socket is connected. +- */ +- public boolean isOpen() { +- if (socket == null) { +- return false; +- } +- return socket.isConnected(); +- } +- +- /** +- * Connects the socket, creating a new socket object if necessary. +- */ +- public void open() throws TTransportException { +- if (isOpen()) { +- throw new TTransportException(TTransportException.ALREADY_OPEN, "Socket already connected."); +- } +- +- if (host.length() == 0) { +- throw new TTransportException(TTransportException.NOT_OPEN, "Cannot open null host."); +- } +- if (port <= 0) { +- throw new TTransportException(TTransportException.NOT_OPEN, "Cannot open without port."); +- } +- +- if (socket == null) { +- initSocket(); +- } +- +- try { +- socket.connect(new InetSocketAddress(host, port), timeout); +- inputStream_ = new BufferedInputStream(socket.getInputStream(), 1024); +- outputStream_ = new BufferedOutputStream(socket.getOutputStream(), 1024); +- } catch (IOException iox) { +- close(); +- throw new TTransportException(TTransportException.NOT_OPEN, iox); +- } +- } +- +- /** +- * Closes the socket. +- */ +- public void close() { +- // Close the underlying streams +- super.close(); +- +- // Close the socket +- if (socket != null) { +- try { +- socket.close(); +- } catch (IOException iox) { +- LOGGER.warn("Could not close socket.", iox); +- } +- socket = null; +- } +- } +- +-} +diff --git a/src/java/org/apache/cassandra/thrift/TFramedTransportFactory.java b/src/java/org/apache/cassandra/thrift/TFramedTransportFactory.java +deleted file mode 100644 +index 7bf0b96..0000000 +--- a/src/java/org/apache/cassandra/thrift/TFramedTransportFactory.java ++++ /dev/null +@@ -1,56 +0,0 @@ +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +-package org.apache.cassandra.thrift; +- +-import java.util.Collections; +-import java.util.Map; +-import java.util.Set; +- +-import org.apache.thrift.transport.TFramedTransport; +-import org.apache.thrift.transport.TSocket; +-import org.apache.thrift.transport.TTransport; +-import org.apache.thrift.transport.TTransportException; +- +-public class TFramedTransportFactory implements ITransportFactory +-{ +- private static final String THRIFT_FRAMED_TRANSPORT_SIZE_IN_MB = "cassandra.thrift.framed.size_mb"; +- private int thriftFramedTransportSizeMb = 15; // 15Mb is the default for C* & Hadoop ConfigHelper +- +- @SuppressWarnings("resource") +- public TTransport openTransport(String host, int port) throws TTransportException +- { +- TSocket socket = new TSocket(host, port); +- TTransport transport = new TFramedTransport(socket, thriftFramedTransportSizeMb * 1024 * 1024); +- transport.open(); +- return transport; +- } +- +- public void setOptions(Map options) +- { +- if (options.containsKey(THRIFT_FRAMED_TRANSPORT_SIZE_IN_MB)) +- thriftFramedTransportSizeMb = Integer.parseInt(options.get(THRIFT_FRAMED_TRANSPORT_SIZE_IN_MB)); +- } +- +- public Set supportedOptions() +- { +- return Collections.singleton(THRIFT_FRAMED_TRANSPORT_SIZE_IN_MB); +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/THsHaDisruptorServer.java b/src/java/org/apache/cassandra/thrift/THsHaDisruptorServer.java +deleted file mode 100644 +index 37bc440..0000000 +--- a/src/java/org/apache/cassandra/thrift/THsHaDisruptorServer.java ++++ /dev/null +@@ -1,109 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.net.InetSocketAddress; +-import java.util.concurrent.SynchronousQueue; +-import java.util.concurrent.ThreadPoolExecutor; +-import java.util.concurrent.TimeUnit; +- +-import com.thinkaurelius.thrift.Message; +-import com.thinkaurelius.thrift.TDisruptorServer; +-import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor; +-import org.apache.cassandra.concurrent.NamedThreadFactory; +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.cassandra.config.DatabaseDescriptor; +-import org.apache.thrift.server.TServer; +-import org.apache.thrift.transport.TNonblockingServerTransport; +-import org.apache.thrift.transport.TNonblockingSocket; +-import org.apache.thrift.transport.TTransportException; +- +-public class THsHaDisruptorServer extends TDisruptorServer +-{ +- private static final Logger logger = LoggerFactory.getLogger(THsHaDisruptorServer.class.getName()); +- +- /** +- * All the arguments to Non Blocking Server will apply here. In addition, +- * executor pool will be responsible for creating the internal threads which +- * will process the data. threads for selection usually are equal to the +- * number of cpu's +- */ +- public THsHaDisruptorServer(Args args) +- { +- super(args); +- logger.info("Starting up {}", this); +- } +- +- @Override +- protected void beforeInvoke(Message buffer) +- { +- TNonblockingSocket socket = (TNonblockingSocket) buffer.transport; +- ThriftSessionManager.instance.setCurrentSocket(socket.getSocketChannel().socket().getRemoteSocketAddress()); +- } +- +- public void beforeClose(Message buffer) +- { +- TNonblockingSocket socket = (TNonblockingSocket) buffer.transport; +- ThriftSessionManager.instance.connectionComplete(socket.getSocketChannel().socket().getRemoteSocketAddress()); +- } +- +- public static class Factory implements TServerFactory +- { +- @SuppressWarnings("resource") +- public TServer buildTServer(Args args) +- { +- if (DatabaseDescriptor.getClientEncryptionOptions().enabled) +- throw new RuntimeException("Client SSL is not supported for non-blocking sockets (hsha). Please remove client ssl from the configuration."); +- +- final InetSocketAddress addr = args.addr; +- TNonblockingServerTransport serverTransport; +- try +- { +- serverTransport = new TCustomNonblockingServerSocket(addr, args.keepAlive, args.sendBufferSize, args.recvBufferSize); +- } +- catch (TTransportException e) +- { +- throw new RuntimeException(String.format("Unable to create thrift socket to %s:%s", addr.getAddress(), addr.getPort()), e); +- } +- +- ThreadPoolExecutor invoker = new JMXEnabledThreadPoolExecutor(DatabaseDescriptor.getRpcMinThreads(), +- DatabaseDescriptor.getRpcMaxThreads(), +- 60L, +- TimeUnit.SECONDS, +- new SynchronousQueue(), +- new NamedThreadFactory("RPC-Thread"), "RPC-THREAD-POOL"); +- +- com.thinkaurelius.thrift.util.TBinaryProtocol.Factory protocolFactory = new com.thinkaurelius.thrift.util.TBinaryProtocol.Factory(true, true); +- +- TDisruptorServer.Args serverArgs = new TDisruptorServer.Args(serverTransport).useHeapBasedAllocation(true) +- .inputTransportFactory(args.inTransportFactory) +- .outputTransportFactory(args.outTransportFactory) +- .inputProtocolFactory(protocolFactory) +- .outputProtocolFactory(protocolFactory) +- .processor(args.processor) +- .maxFrameSizeInBytes(DatabaseDescriptor.getThriftFramedTransportSize()) +- .invocationExecutor(invoker) +- .alwaysReallocateBuffers(true); +- +- return new THsHaDisruptorServer(serverArgs); +- } +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/TServerCustomFactory.java b/src/java/org/apache/cassandra/thrift/TServerCustomFactory.java +deleted file mode 100644 +index 3c5b967..0000000 +--- a/src/java/org/apache/cassandra/thrift/TServerCustomFactory.java ++++ /dev/null +@@ -1,74 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.thrift.server.TServer; +- +-/** +- * Helper implementation to create a thrift TServer based on one of the common types we support (sync, hsha), +- * or a custom type by setting the fully qualified java class name in the rpc_server_type setting. +- */ +-public class TServerCustomFactory implements TServerFactory +-{ +- private static Logger logger = LoggerFactory.getLogger(TServerCustomFactory.class); +- private final String serverType; +- +- public TServerCustomFactory(String serverType) +- { +- assert serverType != null; +- this.serverType = serverType; +- } +- +- public TServer buildTServer(TServerFactory.Args args) +- { +- TServer server; +- if (ThriftServer.SYNC.equalsIgnoreCase(serverType)) +- { +- server = new CustomTThreadPoolServer.Factory().buildTServer(args); +- } +- else if(ThriftServer.ASYNC.equalsIgnoreCase(serverType)) +- { +- server = new CustomTNonBlockingServer.Factory().buildTServer(args); +- logger.info(String.format("Using non-blocking/asynchronous thrift server on %s : %s", args.addr.getHostName(), args.addr.getPort())); +- } +- else if(ThriftServer.HSHA.equalsIgnoreCase(serverType)) +- { +- server = new THsHaDisruptorServer.Factory().buildTServer(args); +- logger.info(String.format("Using custom half-sync/half-async thrift server on %s : %s", args.addr.getHostName(), args.addr.getPort())); +- } +- else +- { +- TServerFactory serverFactory; +- try +- { +- serverFactory = (TServerFactory) Class.forName(serverType).newInstance(); +- } +- catch (Exception e) +- { +- throw new RuntimeException("Failed to instantiate server factory:" + serverType, e); +- } +- server = serverFactory.buildTServer(args); +- logger.info(String.format("Using custom thrift server %s on %s : %s", server.getClass().getName(), args.addr.getHostName(), args.addr.getPort())); +- } +- return server; +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/TServerFactory.java b/src/java/org/apache/cassandra/thrift/TServerFactory.java +deleted file mode 100644 +index 09014ce..0000000 +--- a/src/java/org/apache/cassandra/thrift/TServerFactory.java ++++ /dev/null +@@ -1,44 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.net.InetSocketAddress; +- +-import org.apache.thrift.TProcessor; +-import org.apache.thrift.protocol.TProtocolFactory; +-import org.apache.thrift.server.TServer; +-import org.apache.thrift.transport.TTransportFactory; +- +-public interface TServerFactory +-{ +- TServer buildTServer(Args args); +- +- public static class Args +- { +- public InetSocketAddress addr; +- public Integer listenBacklog; +- public TProcessor processor; +- public TProtocolFactory tProtocolFactory; +- public TTransportFactory inTransportFactory; +- public TTransportFactory outTransportFactory; +- public Integer sendBufferSize; +- public Integer recvBufferSize; +- public boolean keepAlive; +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/ThriftClientState.java b/src/java/org/apache/cassandra/thrift/ThriftClientState.java +deleted file mode 100644 +index 6a3c50f..0000000 +--- a/src/java/org/apache/cassandra/thrift/ThriftClientState.java ++++ /dev/null +@@ -1,56 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.net.InetSocketAddress; +- +-import org.apache.cassandra.config.DatabaseDescriptor; +-import org.apache.cassandra.service.ClientState; +-import org.apache.cassandra.service.QueryState; +- +-/** +- * ClientState used by thrift that also provide a QueryState. +- * +- * Thrift is intrinsically synchronous so there could be only one query per +- * client at a given time. So ClientState and QueryState can be merge into the +- * same object. +- */ +-public class ThriftClientState extends ClientState +-{ +- private final QueryState queryState; +- +- public ThriftClientState(InetSocketAddress remoteAddress) +- { +- super(remoteAddress); +- this.queryState = new QueryState(this); +- } +- +- public QueryState getQueryState() +- { +- return queryState; +- } +- +- public String getSchedulingValue() +- { +- switch(DatabaseDescriptor.getRequestSchedulerId()) +- { +- case keyspace: return getRawKeyspace(); +- } +- return "default"; +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/ThriftConversion.java b/src/java/org/apache/cassandra/thrift/ThriftConversion.java +deleted file mode 100644 +index 35adddf..0000000 +--- a/src/java/org/apache/cassandra/thrift/ThriftConversion.java ++++ /dev/null +@@ -1,726 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.util.*; +- +-import com.google.common.annotations.VisibleForTesting; +-import com.google.common.base.Strings; +-import com.google.common.collect.Maps; +- +-import org.apache.cassandra.config.*; +-import org.apache.cassandra.cql3.ColumnIdentifier; +-import org.apache.cassandra.cql3.Operator; +-import org.apache.cassandra.cql3.statements.IndexTarget; +-import org.apache.cassandra.db.CompactTables; +-import org.apache.cassandra.db.LegacyLayout; +-import org.apache.cassandra.db.WriteType; +-import org.apache.cassandra.db.compaction.AbstractCompactionStrategy; +-import org.apache.cassandra.db.filter.RowFilter; +-import org.apache.cassandra.db.marshal.*; +-import org.apache.cassandra.exceptions.*; +-import org.apache.cassandra.index.TargetParser; +-import org.apache.cassandra.io.compress.ICompressor; +-import org.apache.cassandra.locator.AbstractReplicationStrategy; +-import org.apache.cassandra.locator.LocalStrategy; +-import org.apache.cassandra.schema.*; +-import org.apache.cassandra.serializers.MarshalException; +-import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.cassandra.utils.Pair; +-import org.apache.cassandra.utils.UUIDGen; +- +-/** +- * Static utility methods to convert internal structure to and from thrift ones. +- */ +-public class ThriftConversion +-{ +- public static org.apache.cassandra.db.ConsistencyLevel fromThrift(ConsistencyLevel cl) +- { +- switch (cl) +- { +- case ANY: return org.apache.cassandra.db.ConsistencyLevel.ANY; +- case ONE: return org.apache.cassandra.db.ConsistencyLevel.ONE; +- case TWO: return org.apache.cassandra.db.ConsistencyLevel.TWO; +- case THREE: return org.apache.cassandra.db.ConsistencyLevel.THREE; +- case QUORUM: return org.apache.cassandra.db.ConsistencyLevel.QUORUM; +- case ALL: return org.apache.cassandra.db.ConsistencyLevel.ALL; +- case LOCAL_QUORUM: return org.apache.cassandra.db.ConsistencyLevel.LOCAL_QUORUM; +- case EACH_QUORUM: return org.apache.cassandra.db.ConsistencyLevel.EACH_QUORUM; +- case SERIAL: return org.apache.cassandra.db.ConsistencyLevel.SERIAL; +- case LOCAL_SERIAL: return org.apache.cassandra.db.ConsistencyLevel.LOCAL_SERIAL; +- case LOCAL_ONE: return org.apache.cassandra.db.ConsistencyLevel.LOCAL_ONE; +- } +- throw new AssertionError(); +- } +- +- public static ConsistencyLevel toThrift(org.apache.cassandra.db.ConsistencyLevel cl) +- { +- switch (cl) +- { +- case ANY: return ConsistencyLevel.ANY; +- case ONE: return ConsistencyLevel.ONE; +- case TWO: return ConsistencyLevel.TWO; +- case THREE: return ConsistencyLevel.THREE; +- case QUORUM: return ConsistencyLevel.QUORUM; +- case ALL: return ConsistencyLevel.ALL; +- case LOCAL_QUORUM: return ConsistencyLevel.LOCAL_QUORUM; +- case EACH_QUORUM: return ConsistencyLevel.EACH_QUORUM; +- case SERIAL: return ConsistencyLevel.SERIAL; +- case LOCAL_SERIAL: return ConsistencyLevel.LOCAL_SERIAL; +- case LOCAL_ONE: return ConsistencyLevel.LOCAL_ONE; +- } +- throw new AssertionError(); +- } +- +- // We never return, but returning a RuntimeException allows to write "throw rethrow(e)" without java complaining +- // for methods that have a return value. +- public static RuntimeException rethrow(RequestExecutionException e) throws UnavailableException, TimedOutException +- { +- if (e instanceof RequestFailureException) +- throw toThrift((RequestFailureException)e); +- else if (e instanceof RequestTimeoutException) +- throw toThrift((RequestTimeoutException)e); +- else +- throw new UnavailableException(); +- } +- +- public static InvalidRequestException toThrift(RequestValidationException e) +- { +- return new InvalidRequestException(e.getMessage()); +- } +- +- public static UnavailableException toThrift(org.apache.cassandra.exceptions.UnavailableException e) +- { +- return new UnavailableException(); +- } +- +- public static AuthenticationException toThrift(org.apache.cassandra.exceptions.AuthenticationException e) +- { +- return new AuthenticationException(e.getMessage()); +- } +- +- public static TimedOutException toThrift(RequestTimeoutException e) +- { +- TimedOutException toe = new TimedOutException(); +- if (e instanceof WriteTimeoutException) +- { +- WriteTimeoutException wte = (WriteTimeoutException)e; +- toe.setAcknowledged_by(wte.received); +- if (wte.writeType == WriteType.BATCH_LOG) +- toe.setAcknowledged_by_batchlog(false); +- else if (wte.writeType == WriteType.BATCH) +- toe.setAcknowledged_by_batchlog(true); +- else if (wte.writeType == WriteType.CAS) +- toe.setPaxos_in_progress(true); +- } +- return toe; +- } +- +- // Thrift does not support RequestFailureExceptions, so we translate them into timeouts +- public static TimedOutException toThrift(RequestFailureException e) +- { +- return new TimedOutException(); +- } +- +- public static RowFilter rowFilterFromThrift(CFMetaData metadata, List exprs) +- { +- if (exprs == null || exprs.isEmpty()) +- return RowFilter.NONE; +- +- RowFilter converted = RowFilter.forThrift(exprs.size()); +- for (IndexExpression expr : exprs) +- converted.addThriftExpression(metadata, expr.column_name, Operator.valueOf(expr.op.name()), expr.value); +- return converted; +- } +- +- public static KeyspaceMetadata fromThrift(KsDef ksd, CFMetaData... cfDefs) throws ConfigurationException +- { +- Class cls = AbstractReplicationStrategy.getClass(ksd.strategy_class); +- if (cls.equals(LocalStrategy.class)) +- throw new ConfigurationException("Unable to use given strategy class: LocalStrategy is reserved for internal use."); +- +- Map replicationMap = new HashMap<>(); +- if (ksd.strategy_options != null) +- replicationMap.putAll(ksd.strategy_options); +- replicationMap.put(ReplicationParams.CLASS, cls.getName()); +- +- return KeyspaceMetadata.create(ksd.name, KeyspaceParams.create(ksd.durable_writes, replicationMap), Tables.of(cfDefs)); +- } +- +- public static KsDef toThrift(KeyspaceMetadata ksm) +- { +- List cfDefs = new ArrayList<>(); +- for (CFMetaData cfm : ksm.tables) // do not include views +- if (cfm.isThriftCompatible()) // Don't expose CF that cannot be correctly handle by thrift; see CASSANDRA-4377 for further details +- cfDefs.add(toThrift(cfm)); +- +- KsDef ksdef = new KsDef(ksm.name, ksm.params.replication.klass.getName(), cfDefs); +- ksdef.setStrategy_options(ksm.params.replication.options); +- ksdef.setDurable_writes(ksm.params.durableWrites); +- +- return ksdef; +- } +- +- public static CFMetaData fromThrift(CfDef cf_def) +- throws org.apache.cassandra.exceptions.InvalidRequestException, ConfigurationException +- { +- // This is a creation: the table is dense if it doesn't define any column_metadata +- boolean isDense = cf_def.column_metadata == null || cf_def.column_metadata.isEmpty(); +- return internalFromThrift(cf_def, true, Collections.emptyList(), isDense); +- } +- +- public static CFMetaData fromThriftForUpdate(CfDef cf_def, CFMetaData toUpdate) +- throws org.apache.cassandra.exceptions.InvalidRequestException, ConfigurationException +- { +- return internalFromThrift(cf_def, false, toUpdate.allColumns(), toUpdate.isDense()); +- } +- +- private static boolean isSuper(String thriftColumnType) +- throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- switch (thriftColumnType.toLowerCase()) +- { +- case "standard": return false; +- case "super": return true; +- default: throw new org.apache.cassandra.exceptions.InvalidRequestException("Invalid column type " + thriftColumnType); +- } +- } +- +- /** +- * Convert a thrift CfDef. +- *

    , +- * This is used both for creation and update of CF. +- * +- * @param cf_def the thrift CfDef to convert. +- * @param isCreation whether that is a new table creation or not. +- * @param previousCQLMetadata if it is not a table creation, the previous +- * definitions of the tables (which we use to preserve the CQL metadata). +- * If it is a table creation, this will be empty. +- * @param isDense whether the table is dense or not. +- * +- * @return the converted table definition. +- */ +- private static CFMetaData internalFromThrift(CfDef cf_def, +- boolean isCreation, +- Collection previousCQLMetadata, +- boolean isDense) +- throws org.apache.cassandra.exceptions.InvalidRequestException, ConfigurationException +- { +- applyImplicitDefaults(cf_def); +- +- try +- { +- boolean isSuper = isSuper(cf_def.column_type); +- AbstractType rawComparator = TypeParser.parse(cf_def.comparator_type); +- AbstractType subComparator = isSuper +- ? cf_def.subcomparator_type == null ? BytesType.instance : TypeParser.parse(cf_def.subcomparator_type) +- : null; +- +- AbstractType keyValidator = cf_def.isSetKey_validation_class() ? TypeParser.parse(cf_def.key_validation_class) : BytesType.instance; +- AbstractType defaultValidator = TypeParser.parse(cf_def.default_validation_class); +- +- // Convert the definitions from the input CfDef +- List defs = fromThrift(cf_def.keyspace, cf_def.name, rawComparator, subComparator, cf_def.column_metadata); +- +- // Add the keyAlias if there is one, since that's a CQL metadata that thrift can actually change (for +- // historical reasons) +- boolean hasKeyAlias = cf_def.isSetKey_alias() && keyValidator != null && !(keyValidator instanceof CompositeType); +- if (hasKeyAlias) +- defs.add(ColumnDefinition.partitionKeyDef(cf_def.keyspace, cf_def.name, UTF8Type.instance.getString(cf_def.key_alias), keyValidator, 0)); +- +- // Now add any CQL metadata that we want to copy, skipping the keyAlias if there was one +- for (ColumnDefinition def : previousCQLMetadata) +- { +- // isPartOfCellName basically means 'is not just a CQL metadata' +- if (def.isPartOfCellName(false, isSuper)) +- continue; +- +- if (def.kind == ColumnDefinition.Kind.PARTITION_KEY && hasKeyAlias) +- continue; +- +- defs.add(def); +- } +- +- UUID cfId = Schema.instance.getId(cf_def.keyspace, cf_def.name); +- if (cfId == null) +- cfId = UUIDGen.getTimeUUID(); +- +- boolean isCompound = !isSuper && (rawComparator instanceof CompositeType); +- boolean isCounter = defaultValidator instanceof CounterColumnType; +- +- // If it's a thrift table creation, adds the default CQL metadata for the new table +- if (isCreation) +- { +- addDefaultCQLMetadata(defs, +- cf_def.keyspace, +- cf_def.name, +- hasKeyAlias ? null : keyValidator, +- rawComparator, +- subComparator, +- defaultValidator); +- } +- +- // We do not allow Thrift views, so we always set it to false +- boolean isView = false; +- +- CFMetaData newCFMD = CFMetaData.create(cf_def.keyspace, +- cf_def.name, +- cfId, +- isDense, +- isCompound, +- isSuper, +- isCounter, +- isView, +- defs, +- DatabaseDescriptor.getPartitioner()); +- +- // Convert any secondary indexes defined in the thrift column_metadata +- newCFMD.indexes(indexDefsFromThrift(newCFMD, +- cf_def.keyspace, +- cf_def.name, +- rawComparator, +- subComparator, +- cf_def.column_metadata)); +- +- if (cf_def.isSetGc_grace_seconds()) +- newCFMD.gcGraceSeconds(cf_def.gc_grace_seconds); +- +- newCFMD.compaction(compactionParamsFromThrift(cf_def)); +- +- if (cf_def.isSetBloom_filter_fp_chance()) +- newCFMD.bloomFilterFpChance(cf_def.bloom_filter_fp_chance); +- if (cf_def.isSetMemtable_flush_period_in_ms()) +- newCFMD.memtableFlushPeriod(cf_def.memtable_flush_period_in_ms); +- if (cf_def.isSetCaching() || cf_def.isSetCells_per_row_to_cache()) +- newCFMD.caching(cachingFromThrift(cf_def.caching, cf_def.cells_per_row_to_cache)); +- if (cf_def.isSetRead_repair_chance()) +- newCFMD.readRepairChance(cf_def.read_repair_chance); +- if (cf_def.isSetDefault_time_to_live()) +- newCFMD.defaultTimeToLive(cf_def.default_time_to_live); +- if (cf_def.isSetDclocal_read_repair_chance()) +- newCFMD.dcLocalReadRepairChance(cf_def.dclocal_read_repair_chance); +- if (cf_def.isSetMin_index_interval()) +- newCFMD.minIndexInterval(cf_def.min_index_interval); +- if (cf_def.isSetMax_index_interval()) +- newCFMD.maxIndexInterval(cf_def.max_index_interval); +- if (cf_def.isSetSpeculative_retry()) +- newCFMD.speculativeRetry(SpeculativeRetryParam.fromString(cf_def.speculative_retry)); +- if (cf_def.isSetTriggers()) +- newCFMD.triggers(triggerDefinitionsFromThrift(cf_def.triggers)); +- if (cf_def.isSetComment()) +- newCFMD.comment(cf_def.comment); +- if (cf_def.isSetCompression_options()) +- newCFMD.compression(compressionParametersFromThrift(cf_def.compression_options)); +- +- return newCFMD; +- } +- catch (SyntaxException | MarshalException e) +- { +- throw new ConfigurationException(e.getMessage()); +- } +- } +- +- @SuppressWarnings("unchecked") +- private static CompactionParams compactionParamsFromThrift(CfDef cf_def) +- { +- Class klass = +- CFMetaData.createCompactionStrategy(cf_def.compaction_strategy); +- Map options = new HashMap<>(cf_def.compaction_strategy_options); +- +- int minThreshold = cf_def.min_compaction_threshold; +- int maxThreshold = cf_def.max_compaction_threshold; +- +- if (CompactionParams.supportsThresholdParams(klass)) +- { +- options.putIfAbsent(CompactionParams.Option.MIN_THRESHOLD.toString(), Integer.toString(minThreshold)); +- options.putIfAbsent(CompactionParams.Option.MAX_THRESHOLD.toString(), Integer.toString(maxThreshold)); +- } +- +- return CompactionParams.create(klass, options); +- } +- +- private static CompressionParams compressionParametersFromThrift(Map compression_options) +- { +- CompressionParams compressionParameter = CompressionParams.fromMap(compression_options); +- compressionParameter.validate(); +- return compressionParameter; +- } +- +- private static void addDefaultCQLMetadata(Collection defs, +- String ks, +- String cf, +- AbstractType keyValidator, +- AbstractType comparator, +- AbstractType subComparator, +- AbstractType defaultValidator) +- { +- CompactTables.DefaultNames names = CompactTables.defaultNameGenerator(defs); +- if (keyValidator != null) +- { +- if (keyValidator instanceof CompositeType) +- { +- List> subTypes = ((CompositeType)keyValidator).types; +- for (int i = 0; i < subTypes.size(); i++) +- defs.add(ColumnDefinition.partitionKeyDef(ks, cf, names.defaultPartitionKeyName(), subTypes.get(i), i)); +- } +- else +- { +- defs.add(ColumnDefinition.partitionKeyDef(ks, cf, names.defaultPartitionKeyName(), keyValidator, 0)); +- } +- } +- +- if (subComparator != null) +- { +- // SuperColumn tables: we use a special map to hold dynamic values within a given super column +- defs.add(ColumnDefinition.clusteringDef(ks, cf, names.defaultClusteringName(), comparator, 0)); +- defs.add(ColumnDefinition.regularDef(ks, cf, CompactTables.SUPER_COLUMN_MAP_COLUMN_STR, MapType.getInstance(subComparator, defaultValidator, true))); +- } +- else +- { +- List> subTypes = comparator instanceof CompositeType +- ? ((CompositeType)comparator).types +- : Collections.>singletonList(comparator); +- +- for (int i = 0; i < subTypes.size(); i++) +- defs.add(ColumnDefinition.clusteringDef(ks, cf, names.defaultClusteringName(), subTypes.get(i), i)); +- +- defs.add(ColumnDefinition.regularDef(ks, cf, names.defaultCompactValueName(), defaultValidator)); +- } +- } +- +- /* applies implicit defaults to cf definition. useful in updates */ +- @SuppressWarnings("deprecation") +- private static void applyImplicitDefaults(org.apache.cassandra.thrift.CfDef cf_def) +- { +- if (!cf_def.isSetComment()) +- cf_def.setComment(""); +- if (!cf_def.isSetMin_compaction_threshold()) +- cf_def.setMin_compaction_threshold(CompactionParams.DEFAULT_MIN_THRESHOLD); +- if (!cf_def.isSetMax_compaction_threshold()) +- cf_def.setMax_compaction_threshold(CompactionParams.DEFAULT_MAX_THRESHOLD); +- if (!cf_def.isSetCompaction_strategy()) +- cf_def.setCompaction_strategy(CompactionParams.DEFAULT.klass().getSimpleName()); +- if (!cf_def.isSetCompaction_strategy_options()) +- cf_def.setCompaction_strategy_options(Collections.emptyMap()); +- if (!cf_def.isSetCompression_options()) +- cf_def.setCompression_options(Collections.singletonMap(CompressionParams.SSTABLE_COMPRESSION, CompressionParams.DEFAULT.klass().getCanonicalName())); +- if (!cf_def.isSetDefault_time_to_live()) +- cf_def.setDefault_time_to_live(TableParams.DEFAULT_DEFAULT_TIME_TO_LIVE); +- if (!cf_def.isSetDclocal_read_repair_chance()) +- cf_def.setDclocal_read_repair_chance(TableParams.DEFAULT_DCLOCAL_READ_REPAIR_CHANCE); +- +- // if index_interval was set, use that for the min_index_interval default +- if (!cf_def.isSetMin_index_interval()) +- { +- if (cf_def.isSetIndex_interval()) +- cf_def.setMin_index_interval(cf_def.getIndex_interval()); +- else +- cf_def.setMin_index_interval(TableParams.DEFAULT_MIN_INDEX_INTERVAL); +- } +- +- if (!cf_def.isSetMax_index_interval()) +- { +- // ensure the max is at least as large as the min +- cf_def.setMax_index_interval(Math.max(cf_def.min_index_interval, TableParams.DEFAULT_MAX_INDEX_INTERVAL)); +- } +- } +- +- public static CfDef toThrift(CFMetaData cfm) +- { +- CfDef def = new CfDef(cfm.ksName, cfm.cfName); +- def.setColumn_type(cfm.isSuper() ? "Super" : "Standard"); +- +- if (cfm.isSuper()) +- { +- def.setComparator_type(cfm.comparator.subtype(0).toString()); +- def.setSubcomparator_type(cfm.thriftColumnNameType().toString()); +- } +- else +- { +- def.setComparator_type(LegacyLayout.makeLegacyComparator(cfm).toString()); +- } +- +- def.setComment(cfm.params.comment); +- def.setRead_repair_chance(cfm.params.readRepairChance); +- def.setDclocal_read_repair_chance(cfm.params.dcLocalReadRepairChance); +- def.setGc_grace_seconds(cfm.params.gcGraceSeconds); +- def.setDefault_validation_class(cfm.makeLegacyDefaultValidator().toString()); +- def.setKey_validation_class(cfm.getKeyValidator().toString()); +- def.setMin_compaction_threshold(cfm.params.compaction.minCompactionThreshold()); +- def.setMax_compaction_threshold(cfm.params.compaction.maxCompactionThreshold()); +- // We only return the alias if only one is set since thrift don't know about multiple key aliases +- if (cfm.partitionKeyColumns().size() == 1) +- def.setKey_alias(cfm.partitionKeyColumns().get(0).name.bytes); +- def.setColumn_metadata(columnDefinitionsToThrift(cfm, cfm.allColumns())); +- def.setCompaction_strategy(cfm.params.compaction.klass().getName()); +- def.setCompaction_strategy_options(cfm.params.compaction.options()); +- def.setCompression_options(compressionParametersToThrift(cfm.params.compression)); +- def.setBloom_filter_fp_chance(cfm.params.bloomFilterFpChance); +- def.setMin_index_interval(cfm.params.minIndexInterval); +- def.setMax_index_interval(cfm.params.maxIndexInterval); +- def.setMemtable_flush_period_in_ms(cfm.params.memtableFlushPeriodInMs); +- def.setCaching(toThrift(cfm.params.caching)); +- def.setCells_per_row_to_cache(toThriftCellsPerRow(cfm.params.caching)); +- def.setDefault_time_to_live(cfm.params.defaultTimeToLive); +- def.setSpeculative_retry(cfm.params.speculativeRetry.toString()); +- def.setTriggers(triggerDefinitionsToThrift(cfm.getTriggers())); +- +- return def; +- } +- +- public static ColumnDefinition fromThrift(String ksName, +- String cfName, +- AbstractType thriftComparator, +- AbstractType thriftSubcomparator, +- ColumnDef thriftColumnDef) +- throws SyntaxException, ConfigurationException +- { +- boolean isSuper = thriftSubcomparator != null; +- // For super columns, the componentIndex is 1 because the ColumnDefinition applies to the column component. +- AbstractType comparator = thriftSubcomparator == null ? thriftComparator : thriftSubcomparator; +- try +- { +- comparator.validate(thriftColumnDef.name); +- } +- catch (MarshalException e) +- { +- throw new ConfigurationException(String.format("Column name %s is not valid for comparator %s", ByteBufferUtil.bytesToHex(thriftColumnDef.name), comparator)); +- } +- +- // In our generic layout, we store thrift defined columns as static, but this doesn't work for super columns so we +- // use a regular definition (and "dynamic" columns are handled in a map). +- ColumnDefinition.Kind kind = isSuper ? ColumnDefinition.Kind.REGULAR : ColumnDefinition.Kind.STATIC; +- return new ColumnDefinition(ksName, +- cfName, +- ColumnIdentifier.getInterned(ByteBufferUtil.clone(thriftColumnDef.name), comparator), +- TypeParser.parse(thriftColumnDef.validation_class), +- ColumnDefinition.NO_POSITION, +- kind); +- } +- +- private static List fromThrift(String ksName, +- String cfName, +- AbstractType thriftComparator, +- AbstractType thriftSubcomparator, +- List thriftDefs) +- throws SyntaxException, ConfigurationException +- { +- if (thriftDefs == null) +- return new ArrayList<>(); +- +- List defs = new ArrayList<>(thriftDefs.size()); +- for (ColumnDef thriftColumnDef : thriftDefs) +- defs.add(fromThrift(ksName, cfName, thriftComparator, thriftSubcomparator, thriftColumnDef)); +- +- return defs; +- } +- +- private static Indexes indexDefsFromThrift(CFMetaData cfm, +- String ksName, +- String cfName, +- AbstractType thriftComparator, +- AbstractType thriftSubComparator, +- List thriftDefs) +- { +- if (thriftDefs == null) +- return Indexes.none(); +- +- Set indexNames = new HashSet<>(); +- Indexes.Builder indexes = Indexes.builder(); +- for (ColumnDef def : thriftDefs) +- { +- if (def.isSetIndex_type()) +- { +- ColumnDefinition column = fromThrift(ksName, cfName, thriftComparator, thriftSubComparator, def); +- +- String indexName = def.getIndex_name(); +- // add a generated index name if none was supplied +- if (Strings.isNullOrEmpty(indexName)) +- indexName = Indexes.getAvailableIndexName(ksName, cfName, column.name.toString()); +- +- if (indexNames.contains(indexName)) +- throw new ConfigurationException("Duplicate index name " + indexName); +- +- indexNames.add(indexName); +- +- Map indexOptions = def.getIndex_options(); +- if (indexOptions != null && indexOptions.containsKey(IndexTarget.TARGET_OPTION_NAME)) +- throw new ConfigurationException("Reserved index option 'target' cannot be used"); +- +- IndexMetadata.Kind kind = IndexMetadata.Kind.valueOf(def.index_type.name()); +- +- indexes.add(IndexMetadata.fromLegacyMetadata(cfm, column, indexName, kind, indexOptions)); +- } +- } +- return indexes.build(); +- } +- +- @VisibleForTesting +- public static ColumnDef toThrift(CFMetaData cfMetaData, ColumnDefinition column) +- { +- ColumnDef cd = new ColumnDef(); +- +- cd.setName(ByteBufferUtil.clone(column.name.bytes)); +- cd.setValidation_class(column.type.toString()); +- +- // we include the index in the ColumnDef iff its targets are compatible with +- // pre-3.0 indexes AND it is the only index defined on the given column, that is: +- // * it is the only index on the column (i.e. with this column as its target) +- // * it has only a single target, which matches the pattern for pre-3.0 indexes +- // i.e. keys/values/entries/full, with exactly 1 argument that matches the +- // column name OR a simple column name (for indexes on non-collection columns) +- // n.b. it's a guess that using a pre-compiled regex and checking the group is +- // cheaper than compiling a new regex for each column, but as this isn't on +- // any hot path this hasn't been verified yet. +- IndexMetadata matchedIndex = null; +- for (IndexMetadata index : cfMetaData.getIndexes()) +- { +- Pair target = TargetParser.parse(cfMetaData, index); +- if (target.left.equals(column)) +- { +- // we already found an index for this column, we've no option but to +- // ignore both of them (and any others we've yet to find) +- if (matchedIndex != null) +- return cd; +- +- matchedIndex = index; +- } +- } +- +- if (matchedIndex != null) +- { +- cd.setIndex_type(org.apache.cassandra.thrift.IndexType.valueOf(matchedIndex.kind.name())); +- cd.setIndex_name(matchedIndex.name); +- Map filteredOptions = Maps.filterKeys(matchedIndex.options, +- s -> !IndexTarget.TARGET_OPTION_NAME.equals(s)); +- cd.setIndex_options(filteredOptions.isEmpty() +- ? null +- : Maps.newHashMap(filteredOptions)); +- } +- +- return cd; +- } +- +- private static List columnDefinitionsToThrift(CFMetaData metadata, Collection columns) +- { +- List thriftDefs = new ArrayList<>(columns.size()); +- for (ColumnDefinition def : columns) +- if (def.isPartOfCellName(metadata.isCQLTable(), metadata.isSuper())) +- thriftDefs.add(ThriftConversion.toThrift(metadata, def)); +- return thriftDefs; +- } +- +- private static Triggers triggerDefinitionsFromThrift(List thriftDefs) +- { +- Triggers.Builder triggers = Triggers.builder(); +- for (TriggerDef thriftDef : thriftDefs) +- triggers.add(new TriggerMetadata(thriftDef.getName(), thriftDef.getOptions().get(TriggerMetadata.CLASS))); +- return triggers.build(); +- } +- +- private static List triggerDefinitionsToThrift(Triggers triggers) +- { +- List thriftDefs = new ArrayList<>(); +- for (TriggerMetadata def : triggers) +- { +- TriggerDef td = new TriggerDef(); +- td.setName(def.name); +- td.setOptions(Collections.singletonMap(TriggerMetadata.CLASS, def.classOption)); +- thriftDefs.add(td); +- } +- return thriftDefs; +- } +- +- @SuppressWarnings("deprecation") +- public static Map compressionParametersToThrift(CompressionParams parameters) +- { +- if (!parameters.isEnabled()) +- return Collections.emptyMap(); +- +- Map options = new HashMap<>(parameters.getOtherOptions()); +- Class klass = parameters.getSstableCompressor().getClass(); +- options.put(CompressionParams.SSTABLE_COMPRESSION, klass.getName()); +- options.put(CompressionParams.CHUNK_LENGTH_KB, parameters.chunkLengthInKB()); +- return options; +- } +- +- private static String toThrift(CachingParams caching) +- { +- if (caching.cacheRows() && caching.cacheKeys()) +- return "ALL"; +- +- if (caching.cacheRows()) +- return "ROWS_ONLY"; +- +- if (caching.cacheKeys()) +- return "KEYS_ONLY"; +- +- return "NONE"; +- } +- +- private static CachingParams cachingFromTrhfit(String caching) +- { +- switch (caching.toUpperCase()) +- { +- case "ALL": +- return CachingParams.CACHE_EVERYTHING; +- case "ROWS_ONLY": +- return new CachingParams(false, Integer.MAX_VALUE); +- case "KEYS_ONLY": +- return CachingParams.CACHE_KEYS; +- case "NONE": +- return CachingParams.CACHE_NOTHING; +- default: +- throw new ConfigurationException(String.format("Invalid value %s for caching parameter", caching)); +- } +- } +- +- private static String toThriftCellsPerRow(CachingParams caching) +- { +- return caching.cacheAllRows() +- ? "ALL" +- : String.valueOf(caching.rowsPerPartitionToCache()); +- } +- +- private static int fromThriftCellsPerRow(String value) +- { +- return "ALL".equals(value) +- ? Integer.MAX_VALUE +- : Integer.parseInt(value); +- } +- +- public static CachingParams cachingFromThrift(String caching, String cellsPerRow) +- { +- boolean cacheKeys = true; +- int rowsPerPartitionToCache = 0; +- +- // if we get a caching string from thrift it is legacy, "ALL", "KEYS_ONLY" etc +- if (caching != null) +- { +- CachingParams parsed = cachingFromTrhfit(caching); +- cacheKeys = parsed.cacheKeys(); +- rowsPerPartitionToCache = parsed.rowsPerPartitionToCache(); +- } +- +- // if we get cells_per_row from thrift, it is either "ALL" or "". +- if (cellsPerRow != null && rowsPerPartitionToCache > 0) +- rowsPerPartitionToCache = fromThriftCellsPerRow(cellsPerRow); +- +- return new CachingParams(cacheKeys, rowsPerPartitionToCache); +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/ThriftResultsMerger.java b/src/java/org/apache/cassandra/thrift/ThriftResultsMerger.java +deleted file mode 100644 +index a14409e..0000000 +--- a/src/java/org/apache/cassandra/thrift/ThriftResultsMerger.java ++++ /dev/null +@@ -1,294 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.util.Collections; +-import java.util.Iterator; +-import java.util.NoSuchElementException; +- +-import org.apache.cassandra.db.transform.Transformation; +-import org.apache.cassandra.utils.AbstractIterator; +-import com.google.common.collect.Iterators; +-import com.google.common.collect.PeekingIterator; +- +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.config.ColumnDefinition; +-import org.apache.cassandra.db.*; +-import org.apache.cassandra.db.rows.*; +-import org.apache.cassandra.db.marshal.AbstractType; +-import org.apache.cassandra.db.marshal.MapType; +-import org.apache.cassandra.db.partitions.*; +- +-/** +- * Given an iterator on a partition of a compact table, this return an iterator that merges the +- * static row columns with the other results. +- * +- * Compact tables stores thrift column_metadata as static columns (see CompactTables for +- * details). When reading for thrift however, we want to merge those static values with other +- * results because: +- * 1) on thrift, all "columns" are sorted together, whether or not they are declared +- * column_metadata. +- * 2) it's possible that a table add a value for a "dynamic" column, and later that column +- * is statically defined. Merging "static" and "dynamic" columns make sure we don't miss +- * a value prior to the column declaration. +- * +- * For example, if a thrift table declare 2 columns "c1" and "c5" and the results from a query +- * is: +- * Partition: static: { c1: 3, c5: 4 } +- * "a" : { value : 2 } +- * "c3": { value : 8 } +- * "c7": { value : 1 } +- * then this class transform it into: +- * Partition: "a" : { value : 2 } +- * "c1": { value : 3 } +- * "c3": { value : 8 } +- * "c5": { value : 4 } +- * "c7": { value : 1 } +- */ +-public class ThriftResultsMerger extends Transformation +-{ +- private final int nowInSec; +- +- private ThriftResultsMerger(int nowInSec) +- { +- this.nowInSec = nowInSec; +- } +- +- public static UnfilteredPartitionIterator maybeWrap(UnfilteredPartitionIterator iterator, CFMetaData metadata, int nowInSec) +- { +- if (!metadata.isStaticCompactTable() && !metadata.isSuper()) +- return iterator; +- +- return Transformation.apply(iterator, new ThriftResultsMerger(nowInSec)); +- } +- +- public static UnfilteredRowIterator maybeWrap(UnfilteredRowIterator iterator, int nowInSec) +- { +- if (!iterator.metadata().isStaticCompactTable() && !iterator.metadata().isSuper()) +- return iterator; +- +- return iterator.metadata().isSuper() +- ? Transformation.apply(iterator, new SuperColumnsPartitionMerger(iterator, nowInSec)) +- : new PartitionMerger(iterator, nowInSec); +- } +- +- @Override +- public UnfilteredRowIterator applyToPartition(UnfilteredRowIterator iter) +- { +- return iter.metadata().isSuper() +- ? Transformation.apply(iter, new SuperColumnsPartitionMerger(iter, nowInSec)) +- : new PartitionMerger(iter, nowInSec); +- } +- +- private static class PartitionMerger extends WrappingUnfilteredRowIterator +- { +- private final int nowInSec; +- +- // We initialize lazily to avoid having this iterator fetch the wrapped iterator before it's actually asked for it. +- private boolean isInit; +- +- private Iterator staticCells; +- +- private final Row.Builder builder; +- private Row nextToMerge; +- private Unfiltered nextFromWrapped; +- +- private PartitionMerger(UnfilteredRowIterator results, int nowInSec) +- { +- super(results); +- assert results.metadata().isStaticCompactTable(); +- this.nowInSec = nowInSec; +- this.builder = BTreeRow.sortedBuilder(); +- } +- +- private void init() +- { +- assert !isInit; +- Row staticRow = super.staticRow(); +- assert !staticRow.hasComplex(); +- +- staticCells = staticRow.cells().iterator(); +- updateNextToMerge(); +- isInit = true; +- } +- +- @Override +- public Row staticRow() +- { +- return Rows.EMPTY_STATIC_ROW; +- } +- +- @Override +- public boolean hasNext() +- { +- if (!isInit) +- init(); +- +- return nextFromWrapped != null || nextToMerge != null || super.hasNext(); +- } +- +- @Override +- public Unfiltered next() +- { +- if (!isInit) +- init(); +- +- if (nextFromWrapped == null && super.hasNext()) +- nextFromWrapped = super.next(); +- +- if (nextFromWrapped == null) +- { +- if (nextToMerge == null) +- throw new NoSuchElementException(); +- +- return consumeNextToMerge(); +- } +- +- if (nextToMerge == null) +- return consumeNextWrapped(); +- +- int cmp = metadata().comparator.compare(nextToMerge, nextFromWrapped); +- if (cmp < 0) +- return consumeNextToMerge(); +- if (cmp > 0) +- return consumeNextWrapped(); +- +- // Same row, so merge them +- assert nextFromWrapped instanceof Row; +- return Rows.merge((Row)consumeNextWrapped(), consumeNextToMerge(), nowInSec); +- } +- +- private Unfiltered consumeNextWrapped() +- { +- Unfiltered toReturn = nextFromWrapped; +- nextFromWrapped = null; +- return toReturn; +- } +- +- private Row consumeNextToMerge() +- { +- Row toReturn = nextToMerge; +- updateNextToMerge(); +- return toReturn; +- } +- +- private void updateNextToMerge() +- { +- if (!staticCells.hasNext()) +- { +- // Nothing more to merge. +- nextToMerge = null; +- return; +- } +- +- Cell cell = staticCells.next(); +- +- // Given a static cell, the equivalent row uses the column name as clustering and the value as unique cell value. +- builder.newRow(Clustering.make(cell.column().name.bytes)); +- builder.addCell(new BufferCell(metadata().compactValueColumn(), cell.timestamp(), cell.ttl(), cell.localDeletionTime(), cell.value(), cell.path())); +- nextToMerge = builder.build(); +- } +- } +- +- private static class SuperColumnsPartitionMerger extends Transformation +- { +- private final int nowInSec; +- private final Row.Builder builder; +- private final ColumnDefinition superColumnMapColumn; +- private final AbstractType columnComparator; +- +- private SuperColumnsPartitionMerger(UnfilteredRowIterator applyTo, int nowInSec) +- { +- assert applyTo.metadata().isSuper(); +- this.nowInSec = nowInSec; +- +- this.superColumnMapColumn = applyTo.metadata().compactValueColumn(); +- assert superColumnMapColumn != null && superColumnMapColumn.type instanceof MapType; +- +- this.builder = BTreeRow.sortedBuilder(); +- this.columnComparator = ((MapType)superColumnMapColumn.type).nameComparator(); +- } +- +- @Override +- public Row applyToRow(Row row) +- { +- PeekingIterator staticCells = Iterators.peekingIterator(simpleCellsIterator(row)); +- if (!staticCells.hasNext()) +- return row; +- +- builder.newRow(row.clustering()); +- +- ComplexColumnData complexData = row.getComplexColumnData(superColumnMapColumn); +- +- PeekingIterator dynamicCells; +- if (complexData == null) +- { +- dynamicCells = Iterators.peekingIterator(Collections.emptyIterator()); +- } +- else +- { +- dynamicCells = Iterators.peekingIterator(complexData.iterator()); +- builder.addComplexDeletion(superColumnMapColumn, complexData.complexDeletion()); +- } +- +- while (staticCells.hasNext() && dynamicCells.hasNext()) +- { +- Cell staticCell = staticCells.peek(); +- Cell dynamicCell = dynamicCells.peek(); +- int cmp = columnComparator.compare(staticCell.column().name.bytes, dynamicCell.path().get(0)); +- if (cmp < 0) +- builder.addCell(makeDynamicCell(staticCells.next())); +- else if (cmp > 0) +- builder.addCell(dynamicCells.next()); +- else +- builder.addCell(Cells.reconcile(makeDynamicCell(staticCells.next()), dynamicCells.next(), nowInSec)); +- } +- +- while (staticCells.hasNext()) +- builder.addCell(makeDynamicCell(staticCells.next())); +- while (dynamicCells.hasNext()) +- builder.addCell(dynamicCells.next()); +- +- return builder.build(); +- } +- +- private Cell makeDynamicCell(Cell staticCell) +- { +- return new BufferCell(superColumnMapColumn, staticCell.timestamp(), staticCell.ttl(), staticCell.localDeletionTime(), staticCell.value(), CellPath.create(staticCell.column().name.bytes)); +- } +- +- private Iterator simpleCellsIterator(Row row) +- { +- final Iterator cells = row.cells().iterator(); +- return new AbstractIterator() +- { +- protected Cell computeNext() +- { +- if (cells.hasNext()) +- { +- Cell cell = cells.next(); +- if (cell.column().isSimple()) +- return cell; +- } +- return endOfData(); +- } +- }; +- } +- } +-} +- +diff --git a/src/java/org/apache/cassandra/thrift/ThriftServer.java b/src/java/org/apache/cassandra/thrift/ThriftServer.java +deleted file mode 100644 +index 44ec524..0000000 +--- a/src/java/org/apache/cassandra/thrift/ThriftServer.java ++++ /dev/null +@@ -1,146 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.net.InetAddress; +-import java.net.InetSocketAddress; +- +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.cassandra.service.CassandraDaemon; +-import org.apache.cassandra.config.DatabaseDescriptor; +-import org.apache.thrift.TProcessor; +-import org.apache.thrift.protocol.TBinaryProtocol; +-import org.apache.thrift.server.TServer; +-import org.apache.thrift.transport.TFramedTransport; +-import org.apache.thrift.transport.TTransportFactory; +- +-public class ThriftServer implements CassandraDaemon.Server +-{ +- private static Logger logger = LoggerFactory.getLogger(ThriftServer.class); +- public final static String SYNC = "sync"; +- public final static String ASYNC = "async"; +- public final static String HSHA = "hsha"; +- +- protected final InetAddress address; +- protected final int port; +- protected final int backlog; +- private volatile ThriftServerThread server; +- +- public ThriftServer(InetAddress address, int port, int backlog) +- { +- this.address = address; +- this.port = port; +- this.backlog = backlog; +- } +- +- public void start() +- { +- if (server == null) +- { +- CassandraServer iface = getCassandraServer(); +- server = new ThriftServerThread(address, port, backlog, getProcessor(iface), getTransportFactory()); +- server.start(); +- } +- } +- +- public synchronized void stop() +- { +- if (server != null) +- { +- server.stopServer(); +- try +- { +- server.join(); +- } +- catch (InterruptedException e) +- { +- logger.error("Interrupted while waiting thrift server to stop", e); +- } +- server = null; +- } +- } +- +- public boolean isRunning() +- { +- return server != null; +- } +- +- /* +- * These methods are intended to be overridden to provide custom implementations. +- */ +- protected CassandraServer getCassandraServer() +- { +- return new CassandraServer(); +- } +- +- protected TProcessor getProcessor(CassandraServer server) +- { +- return new Cassandra.Processor(server); +- } +- +- protected TTransportFactory getTransportFactory() +- { +- int tFramedTransportSize = DatabaseDescriptor.getThriftFramedTransportSize(); +- return new TFramedTransport.Factory(tFramedTransportSize); +- } +- +- /** +- * Simple class to run the thrift connection accepting code in separate +- * thread of control. +- */ +- private static class ThriftServerThread extends Thread +- { +- private final TServer serverEngine; +- +- public ThriftServerThread(InetAddress listenAddr, +- int listenPort, +- int listenBacklog, +- TProcessor processor, +- TTransportFactory transportFactory) +- { +- // now we start listening for clients +- logger.info(String.format("Binding thrift service to %s:%s", listenAddr, listenPort)); +- +- TServerFactory.Args args = new TServerFactory.Args(); +- args.tProtocolFactory = new TBinaryProtocol.Factory(true, true); +- args.addr = new InetSocketAddress(listenAddr, listenPort); +- args.listenBacklog = listenBacklog; +- args.processor = processor; +- args.keepAlive = DatabaseDescriptor.getRpcKeepAlive(); +- args.sendBufferSize = DatabaseDescriptor.getRpcSendBufferSize(); +- args.recvBufferSize = DatabaseDescriptor.getRpcRecvBufferSize(); +- args.inTransportFactory = transportFactory; +- args.outTransportFactory = transportFactory; +- serverEngine = new TServerCustomFactory(DatabaseDescriptor.getRpcServerType()).buildTServer(args); +- } +- +- public void run() +- { +- logger.info("Listening for thrift clients..."); +- serverEngine.serve(); +- } +- +- public void stopServer() +- { +- logger.info("Stop listening to thrift clients"); +- serverEngine.stop(); +- } +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/ThriftSessionManager.java b/src/java/org/apache/cassandra/thrift/ThriftSessionManager.java +deleted file mode 100644 +index 3603ad5..0000000 +--- a/src/java/org/apache/cassandra/thrift/ThriftSessionManager.java ++++ /dev/null +@@ -1,85 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.net.InetSocketAddress; +-import java.net.SocketAddress; +-import java.util.Map; +-import java.util.concurrent.ConcurrentHashMap; +- +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-/** +- * Encapsulates the current client state (session). +- * +- * We rely on the Thrift server to tell us what socket it is +- * executing a request for via setCurrentSocket, after which currentSession can do its job anywhere. +- */ +-public class ThriftSessionManager +-{ +- private static final Logger logger = LoggerFactory.getLogger(ThriftSessionManager.class); +- public final static ThriftSessionManager instance = new ThriftSessionManager(); +- +- private final ThreadLocal remoteSocket = new ThreadLocal<>(); +- private final ConcurrentHashMap activeSocketSessions = new ConcurrentHashMap<>(); +- +- /** +- * @param socket the address on which the current thread will work on requests for until further notice +- */ +- public void setCurrentSocket(SocketAddress socket) +- { +- remoteSocket.set(socket); +- } +- +- /** +- * @return the current session for the most recently given socket on this thread +- */ +- public ThriftClientState currentSession() +- { +- SocketAddress socket = remoteSocket.get(); +- assert socket != null; +- +- ThriftClientState cState = activeSocketSessions.get(socket); +- if (cState == null) +- { +- //guarantee atomicity +- ThriftClientState newState = new ThriftClientState((InetSocketAddress)socket); +- cState = activeSocketSessions.putIfAbsent(socket, newState); +- if (cState == null) +- cState = newState; +- } +- return cState; +- } +- +- /** +- * The connection associated with @param socket is permanently finished. +- */ +- public void connectionComplete(SocketAddress socket) +- { +- assert socket != null; +- activeSocketSessions.remove(socket); +- if (logger.isTraceEnabled()) +- logger.trace("ClientState removed for socket addr {}", socket); +- } +- +- public int getConnectedClients() +- { +- return activeSocketSessions.size(); +- } +-} +diff --git a/src/java/org/apache/cassandra/thrift/ThriftValidation.java b/src/java/org/apache/cassandra/thrift/ThriftValidation.java +deleted file mode 100644 +index be3e489..0000000 +--- a/src/java/org/apache/cassandra/thrift/ThriftValidation.java ++++ /dev/null +@@ -1,671 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.thrift; +- +-import java.nio.ByteBuffer; +-import java.util.Arrays; +-import java.util.Comparator; +-import java.util.List; +- +-import org.slf4j.Logger; +-import org.slf4j.LoggerFactory; +- +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.config.ColumnDefinition; +-import org.apache.cassandra.config.Schema; +-import org.apache.cassandra.cql3.Attributes; +-import org.apache.cassandra.cql3.Operator; +-import org.apache.cassandra.db.*; +-import org.apache.cassandra.db.marshal.AbstractType; +-import org.apache.cassandra.dht.IPartitioner; +-import org.apache.cassandra.dht.Token; +-import org.apache.cassandra.index.Index; +-import org.apache.cassandra.index.SecondaryIndexManager; +-import org.apache.cassandra.serializers.MarshalException; +-import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.cassandra.utils.FBUtilities; +- +-/** +- * This has a lot of building blocks for CassandraServer to call to make sure it has valid input +- * -- ensuring column names conform to the declared comparator, for instance. +- * +- * The methods here mostly try to do just one part of the validation so they can be combined +- * for different needs -- supercolumns vs regular, range slices vs named, batch vs single-column. +- * (ValidateColumnPath is the main exception in that it includes keyspace and CF validation.) +- */ +-public class ThriftValidation +-{ +- private static final Logger logger = LoggerFactory.getLogger(ThriftValidation.class); +- +- public static void validateKey(CFMetaData metadata, ByteBuffer key) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (key == null || key.remaining() == 0) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Key may not be empty"); +- } +- +- // check that key can be handled by FBUtilities.writeShortByteArray +- if (key.remaining() > FBUtilities.MAX_UNSIGNED_SHORT) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Key length of " + key.remaining() + +- " is longer than maximum of " + +- FBUtilities.MAX_UNSIGNED_SHORT); +- } +- +- try +- { +- metadata.getKeyValidator().validate(key); +- } +- catch (MarshalException e) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage()); +- } +- } +- +- public static void validateKeyspace(String keyspaceName) throws KeyspaceNotDefinedException +- { +- if (!Schema.instance.getKeyspaces().contains(keyspaceName)) +- { +- throw new KeyspaceNotDefinedException("Keyspace " + keyspaceName + " does not exist"); +- } +- } +- +- public static CFMetaData validateColumnFamily(String keyspaceName, String cfName, boolean isCommutativeOp) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- CFMetaData metadata = validateColumnFamily(keyspaceName, cfName); +- +- if (isCommutativeOp) +- { +- if (!metadata.isCounter()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for non commutative table " + cfName); +- } +- else +- { +- if (metadata.isCounter()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for commutative table " + cfName); +- } +- return metadata; +- } +- +- // To be used when the operation should be authorized whether this is a counter CF or not +- public static CFMetaData validateColumnFamily(String keyspaceName, String cfName) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- validateKeyspace(keyspaceName); +- if (cfName.isEmpty()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("non-empty table is required"); +- +- CFMetaData metadata = Schema.instance.getCFMetaData(keyspaceName, cfName); +- if (metadata == null) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("unconfigured table " + cfName); +- +- return metadata; +- } +- +- /** +- * validates all parts of the path to the column, including the column name +- */ +- public static void validateColumnPath(CFMetaData metadata, ColumnPath column_path) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (!metadata.isSuper()) +- { +- if (column_path.super_column != null) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn parameter is invalid for standard CF " + metadata.cfName); +- } +- if (column_path.column == null) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("column parameter is not optional for standard CF " + metadata.cfName); +- } +- } +- else +- { +- if (column_path.super_column == null) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn parameter is not optional for super CF " + metadata.cfName); +- } +- if (column_path.column != null) +- { +- validateColumnNames(metadata, column_path.super_column, Arrays.asList(column_path.column)); +- } +- if (column_path.super_column != null) +- { +- validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(column_path.super_column)); +- } +- } +- +- public static void validateColumnParent(CFMetaData metadata, ColumnParent column_parent) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (!metadata.isSuper()) +- { +- if (column_parent.super_column != null) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("table alone is required for standard CF " + metadata.cfName); +- } +- } +- +- if (column_parent.super_column != null) +- { +- validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(column_parent.super_column)); +- } +- } +- +- // column_path_or_parent is a ColumnPath for remove, where the "column" is optional even for a standard CF +- static void validateColumnPathOrParent(CFMetaData metadata, ColumnPath column_path_or_parent) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (metadata.isSuper()) +- { +- if (column_path_or_parent.super_column == null && column_path_or_parent.column != null) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("A column cannot be specified without specifying a super column for removal on super CF " +- + metadata.cfName); +- } +- } +- else +- { +- if (column_path_or_parent.super_column != null) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn may not be specified for standard CF " + metadata.cfName); +- } +- } +- if (column_path_or_parent.column != null) +- { +- validateColumnNames(metadata, column_path_or_parent.super_column, Arrays.asList(column_path_or_parent.column)); +- } +- if (column_path_or_parent.super_column != null) +- { +- validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(column_path_or_parent.super_column)); +- } +- } +- +- private static AbstractType getThriftColumnNameComparator(CFMetaData metadata, ByteBuffer superColumnName) +- { +- if (!metadata.isSuper()) +- return LegacyLayout.makeLegacyComparator(metadata); +- +- if (superColumnName == null) +- { +- // comparator for super column name +- return metadata.comparator.subtype(0); +- } +- else +- { +- // comparator for sub columns +- return metadata.thriftColumnNameType(); +- } +- } +- +- /** +- * Validates the column names but not the parent path or data +- */ +- private static void validateColumnNames(CFMetaData metadata, ByteBuffer superColumnName, Iterable column_names) +- throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- int maxNameLength = LegacyLayout.MAX_CELL_NAME_LENGTH; +- +- if (superColumnName != null) +- { +- if (superColumnName.remaining() > maxNameLength) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn name length must not be greater than " + maxNameLength); +- if (superColumnName.remaining() == 0) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn name must not be empty"); +- if (!metadata.isSuper()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn specified to table " + metadata.cfName + " containing normal columns"); +- } +- AbstractType comparator = getThriftColumnNameComparator(metadata, superColumnName); +- boolean isCQL3Table = !metadata.isThriftCompatible(); +- for (ByteBuffer name : column_names) +- { +- if (name.remaining() > maxNameLength) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("column name length must not be greater than " + maxNameLength); +- if (name.remaining() == 0) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("column name must not be empty"); +- try +- { +- comparator.validate(name); +- } +- catch (MarshalException e) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage()); +- } +- +- if (isCQL3Table) +- { +- try +- { +- LegacyLayout.LegacyCellName cname = LegacyLayout.decodeCellName(metadata, name); +- assert cname.clustering.size() == metadata.comparator.size(); +- +- // CQL3 table don't support having only part of their composite column names set +- for (int i = 0; i < cname.clustering.size(); i++) +- { +- if (cname.clustering.get(i) == null) +- throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Not enough components (found %d but %d expected) for column name since %s is a CQL3 table", +- i, metadata.comparator.size() + 1, metadata.cfName)); +- } +- +- // On top of that, if we have a collection component, the (CQL3) column must be a collection +- if (cname.column != null && cname.collectionElement != null && !cname.column.type.isCollection()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Invalid collection component, %s is not a collection", cname.column.name)); +- } +- catch (IllegalArgumentException | UnknownColumnException e) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Error validating cell name for CQL3 table %s: %s", metadata.cfName, e.getMessage())); +- } +- } +- } +- } +- +- public static void validateColumnNames(CFMetaData metadata, ColumnParent column_parent, Iterable column_names) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- validateColumnNames(metadata, column_parent.super_column, column_names); +- } +- +- public static void validateRange(CFMetaData metadata, ColumnParent column_parent, SliceRange range) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (range.count < 0) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("get_slice requires non-negative count"); +- +- int maxNameLength = LegacyLayout.MAX_CELL_NAME_LENGTH; +- if (range.start.remaining() > maxNameLength) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("range start length cannot be larger than " + maxNameLength); +- if (range.finish.remaining() > maxNameLength) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("range finish length cannot be larger than " + maxNameLength); +- +- AbstractType comparator = getThriftColumnNameComparator(metadata, column_parent.super_column); +- try +- { +- comparator.validate(range.start); +- comparator.validate(range.finish); +- } +- catch (MarshalException e) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage()); +- } +- +- Comparator orderedComparator = range.isReversed() ? comparator.reverseComparator : comparator; +- if (range.start.remaining() > 0 +- && range.finish.remaining() > 0 +- && orderedComparator.compare(range.start, range.finish) > 0) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("range finish must come after start in the order of traversal"); +- } +- } +- +- public static void validateColumnOrSuperColumn(CFMetaData metadata, ColumnOrSuperColumn cosc) +- throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- boolean isCommutative = metadata.isCounter(); +- +- int nulls = 0; +- if (cosc.column == null) nulls++; +- if (cosc.super_column == null) nulls++; +- if (cosc.counter_column == null) nulls++; +- if (cosc.counter_super_column == null) nulls++; +- +- if (nulls != 3) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("ColumnOrSuperColumn must have one (and only one) of column, super_column, counter and counter_super_column"); +- +- if (cosc.column != null) +- { +- if (isCommutative) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for commutative table " + metadata.cfName); +- +- validateTtl(cosc.column); +- validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column((ByteBuffer)null).setColumn(cosc.column.name)); +- validateColumnData(metadata, null, cosc.column); +- } +- +- if (cosc.super_column != null) +- { +- if (isCommutative) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for commutative table " + metadata.cfName); +- +- for (Column c : cosc.super_column.columns) +- { +- validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column(cosc.super_column.name).setColumn(c.name)); +- validateColumnData(metadata, cosc.super_column.name, c); +- } +- } +- +- if (cosc.counter_column != null) +- { +- if (!isCommutative) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for non commutative table " + metadata.cfName); +- +- validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column((ByteBuffer)null).setColumn(cosc.counter_column.name)); +- } +- +- if (cosc.counter_super_column != null) +- { +- if (!isCommutative) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for non commutative table " + metadata.cfName); +- +- for (CounterColumn c : cosc.counter_super_column.columns) +- validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column(cosc.counter_super_column.name).setColumn(c.name)); +- } +- } +- +- private static void validateTtl(Column column) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (column.isSetTtl()) +- { +- if (column.ttl < 0) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("ttl must be greater or equal to 0"); +- +- if (column.ttl > Attributes.MAX_TTL) +- throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("ttl is too large. requested (%d) maximum (%d)", column.ttl, Attributes.MAX_TTL)); +- } +- else +- { +- // if it's not set, then it should be zero -- here we are just checking to make sure Thrift doesn't change that contract with us. +- assert column.ttl == 0; +- } +- } +- +- public static void validateMutation(CFMetaData metadata, Mutation mut) +- throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- ColumnOrSuperColumn cosc = mut.column_or_supercolumn; +- Deletion del = mut.deletion; +- +- int nulls = 0; +- if (cosc == null) nulls++; +- if (del == null) nulls++; +- +- if (nulls != 1) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("mutation must have one and only one of column_or_supercolumn or deletion"); +- } +- +- if (cosc != null) +- { +- validateColumnOrSuperColumn(metadata, cosc); +- } +- else +- { +- validateDeletion(metadata, del); +- } +- } +- +- public static void validateDeletion(CFMetaData metadata, Deletion del) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- +- if (del.super_column != null) +- validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(del.super_column)); +- +- if (del.predicate != null) +- validateSlicePredicate(metadata, del.super_column, del.predicate); +- +- if (!metadata.isSuper() && del.super_column != null) +- { +- String msg = String.format("Deletion of super columns is not possible on a standard table (KeySpace=%s Table=%s Deletion=%s)", metadata.ksName, metadata.cfName, del); +- throw new org.apache.cassandra.exceptions.InvalidRequestException(msg); +- } +- +- if (metadata.isCounter()) +- { +- // forcing server timestamp even if a timestamp was set for coherence with other counter operation +- del.timestamp = FBUtilities.timestampMicros(); +- } +- else if (!del.isSetTimestamp()) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Deletion timestamp is not optional for non commutative table " + metadata.cfName); +- } +- } +- +- public static void validateSlicePredicate(CFMetaData metadata, ByteBuffer scName, SlicePredicate predicate) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (predicate.column_names == null && predicate.slice_range == null) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("A SlicePredicate must be given a list of Columns, a SliceRange, or both"); +- +- if (predicate.slice_range != null) +- validateRange(metadata, new ColumnParent(metadata.cfName).setSuper_column(scName), predicate.slice_range); +- +- if (predicate.column_names != null) +- validateColumnNames(metadata, scName, predicate.column_names); +- } +- +- /** +- * Validates the data part of the column (everything in the column object but the name, which is assumed to be valid) +- */ +- public static void validateColumnData(CFMetaData metadata, ByteBuffer scName, Column column) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- validateTtl(column); +- if (!column.isSetValue()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Column value is required"); +- if (!column.isSetTimestamp()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Column timestamp is required"); +- +- try +- { +- LegacyLayout.LegacyCellName cn = LegacyLayout.decodeCellName(metadata, scName, column.name); +- cn.column.validateCellValue(column.value); +- +- } +- catch (UnknownColumnException e) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage()); +- } +- catch (MarshalException me) +- { +- if (logger.isTraceEnabled()) +- logger.trace("rejecting invalid value {}", ByteBufferUtil.bytesToHex(summarize(column.value))); +- +- throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("(%s) [%s][%s][%s] failed validation", +- me.getMessage(), +- metadata.ksName, +- metadata.cfName, +- (getThriftColumnNameComparator(metadata, scName)).getString(column.name))); +- } +- +- } +- +- /** +- * Return, at most, the first 64K of the buffer. This avoids very large column values being +- * logged in their entirety. +- */ +- private static ByteBuffer summarize(ByteBuffer buffer) +- { +- int MAX = Short.MAX_VALUE; +- if (buffer.remaining() <= MAX) +- return buffer; +- return (ByteBuffer) buffer.slice().limit(buffer.position() + MAX); +- } +- +- +- public static void validatePredicate(CFMetaData metadata, ColumnParent column_parent, SlicePredicate predicate) +- throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (predicate.column_names == null && predicate.slice_range == null) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("predicate column_names and slice_range may not both be null"); +- if (predicate.column_names != null && predicate.slice_range != null) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("predicate column_names and slice_range may not both be present"); +- +- if (predicate.getSlice_range() != null) +- validateRange(metadata, column_parent, predicate.slice_range); +- else +- validateColumnNames(metadata, column_parent, predicate.column_names); +- } +- +- public static void validateKeyRange(CFMetaData metadata, ByteBuffer superColumn, KeyRange range) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if ((range.start_key == null) == (range.start_token == null) +- || (range.end_key == null) == (range.end_token == null)) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("exactly one each of {start key, start token} and {end key, end token} must be specified"); +- } +- +- // (key, token) is supported (for wide-row CFRR) but not (token, key) +- if (range.start_token != null && range.end_key != null) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("start token + end key is not a supported key range"); +- +- IPartitioner p = metadata.partitioner; +- +- if (range.start_key != null && range.end_key != null) +- { +- Token startToken = p.getToken(range.start_key); +- Token endToken = p.getToken(range.end_key); +- if (startToken.compareTo(endToken) > 0 && !endToken.isMinimum()) +- { +- if (p.preservesOrder()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("start key must sort before (or equal to) finish key in your partitioner!"); +- else +- throw new org.apache.cassandra.exceptions.InvalidRequestException("start key's token sorts after end key's token. this is not allowed; you probably should not specify end key at all except with an ordered partitioner"); +- } +- } +- else if (range.start_key != null && range.end_token != null) +- { +- // start_token/end_token can wrap, but key/token should not +- PartitionPosition stop = p.getTokenFactory().fromString(range.end_token).maxKeyBound(); +- if (PartitionPosition.ForKey.get(range.start_key, p).compareTo(stop) > 0 && !stop.isMinimum()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Start key's token sorts after end token"); +- } +- +- validateFilterClauses(metadata, range.row_filter); +- +- if (!isEmpty(range.row_filter) && superColumn != null) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("super columns are not supported for indexing"); +- } +- +- if (range.count <= 0) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException("maxRows must be positive"); +- } +- } +- +- private static boolean isEmpty(List clause) +- { +- return clause == null || clause.isEmpty(); +- } +- +- public static void validateIndexClauses(CFMetaData metadata, IndexClause index_clause) +- throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (index_clause.expressions.isEmpty()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("index clause list may not be empty"); +- +- if (!validateFilterClauses(metadata, index_clause.expressions)) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("No indexed columns present in index clause with operator EQ"); +- } +- +- // return true if index_clause contains an indexed columns with operator EQ +- public static boolean validateFilterClauses(CFMetaData metadata, List index_clause) +- throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (isEmpty(index_clause)) +- // no filter to apply +- return false; +- +- SecondaryIndexManager idxManager = Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager; +- AbstractType nameValidator = getThriftColumnNameComparator(metadata, null); +- +- boolean isIndexed = false; +- for (IndexExpression expression : index_clause) +- { +- try +- { +- nameValidator.validate(expression.column_name); +- } +- catch (MarshalException me) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("[%s]=[%s] failed name validation (%s)", +- ByteBufferUtil.bytesToHex(expression.column_name), +- ByteBufferUtil.bytesToHex(expression.value), +- me.getMessage())); +- } +- +- if (expression.value.remaining() > 0xFFFF) +- throw new org.apache.cassandra.exceptions.InvalidRequestException("Index expression values may not be larger than 64K"); +- +- ColumnDefinition def = metadata.getColumnDefinition(expression.column_name); +- if (def == null) +- { +- if (!metadata.isCompactTable()) +- throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Unknown column %s", nameValidator.getString(expression.column_name))); +- +- def = metadata.compactValueColumn(); +- } +- +- try +- { +- def.type.validate(expression.value); +- } +- catch (MarshalException me) +- { +- throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("[%s]=[%s] failed value validation (%s)", +- ByteBufferUtil.bytesToHex(expression.column_name), +- ByteBufferUtil.bytesToHex(expression.value), +- me.getMessage())); +- } +- +- for(Index index : idxManager.listIndexes()) +- isIndexed |= index.supportsExpression(def, Operator.valueOf(expression.op.name())); +- } +- +- return isIndexed; +- } +- +- public static void validateKeyspaceNotYetExisting(String newKsName) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- // keyspace names must be unique case-insensitively because the keyspace name becomes the directory +- // where we store CF sstables. Names that differ only in case would thus cause problems on +- // case-insensitive filesystems (NTFS, most installations of HFS+). +- for (String ksName : Schema.instance.getKeyspaces()) +- { +- if (ksName.equalsIgnoreCase(newKsName)) +- throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Keyspace names must be case-insensitively unique (\"%s\" conflicts with \"%s\")", +- newKsName, +- ksName)); +- } +- } +- +- public static void validateKeyspaceNotSystem(String modifiedKeyspace) throws org.apache.cassandra.exceptions.InvalidRequestException +- { +- if (Schema.isSystemKeyspace(modifiedKeyspace)) +- throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("%s keyspace is not user-modifiable", modifiedKeyspace)); +- } +- +- //public static IDiskAtomFilter asIFilter(SlicePredicate sp, CFMetaData metadata, ByteBuffer superColumn) +- //{ +- // SliceRange sr = sp.slice_range; +- // IDiskAtomFilter filter; +- +- // CellNameType comparator = metadata.isSuper() +- // ? new SimpleDenseCellNameType(metadata.comparator.subtype(superColumn == null ? 0 : 1)) +- // : metadata.comparator; +- // if (sr == null) +- // { +- +- // SortedSet ss = new TreeSet(comparator); +- // for (ByteBuffer bb : sp.column_names) +- // ss.add(comparator.cellFromByteBuffer(bb)); +- // filter = new NamesQueryFilter(ss); +- // } +- // else +- // { +- // filter = new SliceQueryFilter(comparator.fromByteBuffer(sr.start), +- // comparator.fromByteBuffer(sr.finish), +- // sr.reversed, +- // sr.count); +- // } +- +- // if (metadata.isSuper()) +- // filter = SuperColumns.fromSCFilter(metadata.comparator, superColumn, filter); +- // return filter; +- //} +-} +diff --git a/src/java/org/apache/cassandra/tools/NodeProbe.java b/src/java/org/apache/cassandra/tools/NodeProbe.java +index 3bf99ef..53b0dee 100644 +--- a/src/java/org/apache/cassandra/tools/NodeProbe.java ++++ b/src/java/org/apache/cassandra/tools/NodeProbe.java +@@ -937,21 +937,6 @@ public class NodeProbe implements AutoCloseable + return ssProxy.isGossipRunning(); + } + +- public void stopThriftServer() +- { +- ssProxy.stopRPCServer(); +- } +- +- public void startThriftServer() +- { +- ssProxy.startRPCServer(); +- } +- +- public boolean isThriftServerRunning() +- { +- return ssProxy.isRPCServerRunning(); +- } +- + public void stopCassandraDaemon() + { + ssProxy.stopDaemon(); +diff --git a/src/java/org/apache/cassandra/tools/NodeTool.java b/src/java/org/apache/cassandra/tools/NodeTool.java +index 8640b58..c264c31 100644 +--- a/src/java/org/apache/cassandra/tools/NodeTool.java ++++ b/src/java/org/apache/cassandra/tools/NodeTool.java +@@ -75,7 +75,6 @@ public class NodeTool + EnableGossip.class, + DisableGossip.class, + EnableHandoff.class, +- EnableThrift.class, + GcStats.class, + GetCompactionThreshold.class, + GetCompactionThroughput.class, +@@ -113,7 +112,6 @@ public class NodeTool + Status.class, + StatusBinary.class, + StatusGossip.class, +- StatusThrift.class, + StatusBackup.class, + StatusHandoff.class, + Stop.class, +@@ -127,7 +125,6 @@ public class NodeTool + ResetLocalSchema.class, + ReloadTriggers.class, + SetCacheKeysToSave.class, +- DisableThrift.class, + DisableHandoff.class, + Drain.class, + TruncateHints.class, +diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableThrift.java b/src/java/org/apache/cassandra/tools/nodetool/DisableThrift.java +deleted file mode 100644 +index 148b195..0000000 +--- a/src/java/org/apache/cassandra/tools/nodetool/DisableThrift.java ++++ /dev/null +@@ -1,33 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.tools.nodetool; +- +-import io.airlift.command.Command; +- +-import org.apache.cassandra.tools.NodeProbe; +-import org.apache.cassandra.tools.NodeTool.NodeToolCmd; +- +-@Command(name = "disablethrift", description = "Disable thrift server") +-public class DisableThrift extends NodeToolCmd +-{ +- @Override +- public void execute(NodeProbe probe) +- { +- probe.stopThriftServer(); +- } +-} +\ No newline at end of file +diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableThrift.java b/src/java/org/apache/cassandra/tools/nodetool/EnableThrift.java +deleted file mode 100644 +index 780b36d..0000000 +--- a/src/java/org/apache/cassandra/tools/nodetool/EnableThrift.java ++++ /dev/null +@@ -1,33 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.tools.nodetool; +- +-import io.airlift.command.Command; +- +-import org.apache.cassandra.tools.NodeProbe; +-import org.apache.cassandra.tools.NodeTool.NodeToolCmd; +- +-@Command(name = "enablethrift", description = "Reenable thrift server") +-public class EnableThrift extends NodeToolCmd +-{ +- @Override +- public void execute(NodeProbe probe) +- { +- probe.startThriftServer(); +- } +-} +\ No newline at end of file +diff --git a/src/java/org/apache/cassandra/tools/nodetool/Info.java b/src/java/org/apache/cassandra/tools/nodetool/Info.java +index 032e47f..bddd124 100644 +--- a/src/java/org/apache/cassandra/tools/nodetool/Info.java ++++ b/src/java/org/apache/cassandra/tools/nodetool/Info.java +@@ -47,7 +47,6 @@ public class Info extends NodeToolCmd + + System.out.printf("%-23s: %s%n", "ID", probe.getLocalHostId()); + System.out.printf("%-23s: %s%n", "Gossip active", gossipInitialized); +- System.out.printf("%-23s: %s%n", "Thrift active", probe.isThriftServerRunning()); + System.out.printf("%-23s: %s%n", "Native Transport active", probe.isNativeTransportRunning()); + System.out.printf("%-23s: %s%n", "Load", probe.getLoadString()); + if (gossipInitialized) +diff --git a/src/java/org/apache/cassandra/tools/nodetool/StatusThrift.java b/src/java/org/apache/cassandra/tools/nodetool/StatusThrift.java +deleted file mode 100644 +index 0cb17d2..0000000 +--- a/src/java/org/apache/cassandra/tools/nodetool/StatusThrift.java ++++ /dev/null +@@ -1,36 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.tools.nodetool; +- +-import io.airlift.command.Command; +- +-import org.apache.cassandra.tools.NodeProbe; +-import org.apache.cassandra.tools.NodeTool.NodeToolCmd; +- +-@Command(name = "statusthrift", description = "Status of thrift server") +-public class StatusThrift extends NodeToolCmd +-{ +- @Override +- public void execute(NodeProbe probe) +- { +- System.out.println( +- probe.isThriftServerRunning() +- ? "running" +- : "not running"); +- } +-} +\ No newline at end of file +diff --git a/src/java/org/apache/cassandra/transport/messages/ResultMessage.java b/src/java/org/apache/cassandra/transport/messages/ResultMessage.java +index b76243f..ef6df22 100644 +--- a/src/java/org/apache/cassandra/transport/messages/ResultMessage.java ++++ b/src/java/org/apache/cassandra/transport/messages/ResultMessage.java +@@ -27,9 +27,6 @@ import org.apache.cassandra.cql3.ResultSet; + import org.apache.cassandra.cql3.statements.SelectStatement; + import org.apache.cassandra.cql3.statements.ParsedStatement; + import org.apache.cassandra.transport.*; +-import org.apache.cassandra.thrift.CqlPreparedResult; +-import org.apache.cassandra.thrift.CqlResult; +-import org.apache.cassandra.thrift.CqlResultType; + import org.apache.cassandra.utils.MD5Digest; + + public abstract class ResultMessage extends Message.Response +@@ -103,8 +100,6 @@ public abstract class ResultMessage extends Message.Response + this.kind = kind; + } + +- public abstract CqlResult toThriftResult(); +- + public static class Void extends ResultMessage + { + // Even though we have no specific information here, don't make a +@@ -132,11 +127,6 @@ public abstract class ResultMessage extends Message.Response + } + }; + +- public CqlResult toThriftResult() +- { +- return new CqlResult(CqlResultType.VOID); +- } +- + @Override + public String toString() + { +@@ -175,11 +165,6 @@ public abstract class ResultMessage extends Message.Response + } + }; + +- public CqlResult toThriftResult() +- { +- return new CqlResult(CqlResultType.VOID); +- } +- + @Override + public String toString() + { +@@ -219,11 +204,6 @@ public abstract class ResultMessage extends Message.Response + this.result = result; + } + +- public CqlResult toThriftResult() +- { +- return result.toThriftResult(); +- } +- + @Override + public String toString() + { +@@ -244,7 +224,7 @@ public abstract class ResultMessage extends Message.Response + if (version > 1) + resultMetadata = ResultSet.ResultMetadata.codec.decode(body, version); + +- return new Prepared(id, -1, metadata, resultMetadata); ++ return new Prepared(id, metadata, resultMetadata); + } + + public void encode(ResultMessage msg, ByteBuf dest, int version) +@@ -282,24 +262,15 @@ public abstract class ResultMessage extends Message.Response + /** Describes the results of executing this prepared statement */ + public final ResultSet.ResultMetadata resultMetadata; + +- // statement id for CQL-over-thrift compatibility. The binary protocol ignore that. +- private final int thriftStatementId; +- + public Prepared(MD5Digest statementId, ParsedStatement.Prepared prepared) + { +- this(statementId, -1, new ResultSet.PreparedMetadata(prepared.boundNames, prepared.partitionKeyBindIndexes), extractResultMetadata(prepared.statement)); ++ this(statementId, new ResultSet.PreparedMetadata(prepared.boundNames, prepared.partitionKeyBindIndexes), extractResultMetadata(prepared.statement)); + } + +- public static Prepared forThrift(int statementId, List names) +- { +- return new Prepared(null, statementId, new ResultSet.PreparedMetadata(names, null), ResultSet.ResultMetadata.EMPTY); +- } +- +- private Prepared(MD5Digest statementId, int thriftStatementId, ResultSet.PreparedMetadata metadata, ResultSet.ResultMetadata resultMetadata) ++ private Prepared(MD5Digest statementId, ResultSet.PreparedMetadata metadata, ResultSet.ResultMetadata resultMetadata) + { + super(Kind.PREPARED); + this.statementId = statementId; +- this.thriftStatementId = thriftStatementId; + this.metadata = metadata; + this.resultMetadata = resultMetadata; + } +@@ -312,23 +283,6 @@ public abstract class ResultMessage extends Message.Response + return ((SelectStatement)statement).getResultMetadata(); + } + +- public CqlResult toThriftResult() +- { +- throw new UnsupportedOperationException(); +- } +- +- public CqlPreparedResult toThriftPreparedResult() +- { +- List namesString = new ArrayList(metadata.names.size()); +- List typesString = new ArrayList(metadata.names.size()); +- for (ColumnSpecification name : metadata.names) +- { +- namesString.add(name.toString()); +- typesString.add(name.type.toString()); +- } +- return new CqlPreparedResult(thriftStatementId, metadata.names.size()).setVariable_types(typesString).setVariable_names(namesString); +- } +- + @Override + public String toString() + { +@@ -368,11 +322,6 @@ public abstract class ResultMessage extends Message.Response + } + }; + +- public CqlResult toThriftResult() +- { +- return new CqlResult(CqlResultType.VOID); +- } +- + @Override + public String toString() + { +diff --git a/src/java/org/apache/cassandra/utils/BatchRemoveIterator.java b/src/java/org/apache/cassandra/utils/BatchRemoveIterator.java +deleted file mode 100644 +index 4377426..0000000 +--- a/src/java/org/apache/cassandra/utils/BatchRemoveIterator.java ++++ /dev/null +@@ -1,32 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.utils; +- +-import java.util.Iterator; +- +-/** +- * Iterator that allows us to more efficiently remove many items +- */ +-public interface BatchRemoveIterator extends Iterator +-{ +- /** +- * Commits the remove operations in this batch iterator. After this no more +- * deletes can be made. Any further calls to remove() or commit() will throw IllegalStateException. +- */ +- void commit(); +-} +diff --git a/src/java/org/apache/cassandra/utils/ByteBufferUtil.java b/src/java/org/apache/cassandra/utils/ByteBufferUtil.java +index cb4fc1d..47c901b 100644 +--- a/src/java/org/apache/cassandra/utils/ByteBufferUtil.java ++++ b/src/java/org/apache/cassandra/utils/ByteBufferUtil.java +@@ -411,6 +411,15 @@ public class ByteBufferUtil + return bytes; + } + ++ public static byte[] readBytesWithLength(DataInput in) throws IOException ++ { ++ int length = in.readInt(); ++ if (length < 0) ++ throw new IOException("Corrupt (negative) value length encountered"); ++ ++ return readBytes(in, length); ++ } ++ + /** + * Convert a byte buffer to an integer. + * Does not change the byte buffer position. +diff --git a/test/conf/cassandra-murmur.yaml b/test/conf/cassandra-murmur.yaml +index a4b25ba..a8288d0 100644 +--- a/test/conf/cassandra-murmur.yaml ++++ b/test/conf/cassandra-murmur.yaml +@@ -14,7 +14,6 @@ hints_directory: build/test/cassandra/hints + partitioner: org.apache.cassandra.dht.Murmur3Partitioner + listen_address: 127.0.0.1 + storage_port: 7010 +-rpc_port: 9170 + start_native_transport: true + native_transport_port: 9042 + column_index_size_in_kb: 4 +@@ -28,8 +27,6 @@ seed_provider: + - seeds: "127.0.0.1" + endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch + dynamic_snitch: true +-request_scheduler: org.apache.cassandra.scheduler.RoundRobinScheduler +-request_scheduler_id: keyspace + server_encryption_options: + internode_encryption: none + keystore: conf/.keystore +diff --git a/test/conf/cassandra.yaml b/test/conf/cassandra.yaml +index cf02634..6a5d6e0 100644 +--- a/test/conf/cassandra.yaml ++++ b/test/conf/cassandra.yaml +@@ -15,7 +15,6 @@ hints_directory: build/test/cassandra/hints + partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner + listen_address: 127.0.0.1 + storage_port: 7010 +-rpc_port: 9170 + start_native_transport: true + native_transport_port: 9042 + column_index_size_in_kb: 4 +@@ -29,8 +28,6 @@ seed_provider: + - seeds: "127.0.0.1" + endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch + dynamic_snitch: true +-request_scheduler: org.apache.cassandra.scheduler.RoundRobinScheduler +-request_scheduler_id: keyspace + server_encryption_options: + internode_encryption: none + keystore: conf/.keystore +diff --git a/test/resources/functions/configure_cassandra.sh b/test/resources/functions/configure_cassandra.sh +index 3464653..38ff098 100644 +--- a/test/resources/functions/configure_cassandra.sh ++++ b/test/resources/functions/configure_cassandra.sh +@@ -45,40 +45,25 @@ function configure_cassandra() { + ;; + esac + +- OH_SIX_CONFIG="/etc/cassandra/conf/storage-conf.xml" +- +- if [[ -e "$OH_SIX_CONFIG" ]] ; then +- config_file=$OH_SIX_CONFIG +- seeds="" ++ config_file="/etc/cassandra/conf/cassandra.yaml" ++ if [[ "x"`grep -e '^seeds:' $config_file` == "x" ]]; then ++ seeds="$1" # 08 format seeds ++ shift + for server in "$@"; do +- seeds="${seeds}${server}" ++ seeds="${seeds},${server}" + done +- +- #TODO set replication +- sed -i -e "s|127.0.0.1|$seeds|" $config_file +- sed -i -e "s|localhost|$PRIVATE_SELF_HOST|" $config_file +- sed -i -e "s|localhost|$PUBLIC_SELF_HOST|" $config_file ++ sed -i -e "s|- seeds: \"127.0.0.1\"|- seeds: \"${seeds}\"|" $config_file + else +- config_file="/etc/cassandra/conf/cassandra.yaml" +- if [[ "x"`grep -e '^seeds:' $config_file` == "x" ]]; then +- seeds="$1" # 08 format seeds +- shift +- for server in "$@"; do +- seeds="${seeds},${server}" +- done +- sed -i -e "s|- seeds: \"127.0.0.1\"|- seeds: \"${seeds}\"|" $config_file +- else +- seeds="" # 07 format seeds +- for server in "$@"; do +- seeds="${seeds}\n - ${server}" +- done +- sed -i -e "/^seeds:/,/^/d" $config_file ; echo -e "seeds:${seeds}" >> $config_file +- fi +- +- sed -i -e "s|listen_address: localhost|listen_address: $PRIVATE_SELF_HOST|" $config_file +- sed -i -e "s|rpc_address: localhost|rpc_address: $PUBLIC_SELF_HOST|" $config_file ++ seeds="" # 07 format seeds ++ for server in "$@"; do ++ seeds="${seeds}\n - ${server}" ++ done ++ sed -i -e "/^seeds:/,/^/d" $config_file ; echo -e "seeds:${seeds}" >> $config_file + fi + ++ sed -i -e "s|listen_address: localhost|listen_address: $PRIVATE_SELF_HOST|" $config_file ++ sed -i -e "s|rpc_address: localhost|rpc_address: $PUBLIC_SELF_HOST|" $config_file ++ + # Now that it's configured, start Cassandra + nohup /etc/rc.local & + +diff --git a/test/unit/org/apache/cassandra/OffsetAwareConfigurationLoader.java b/test/unit/org/apache/cassandra/OffsetAwareConfigurationLoader.java +index 0047f48..0e4a3cf 100644 +--- a/test/unit/org/apache/cassandra/OffsetAwareConfigurationLoader.java ++++ b/test/unit/org/apache/cassandra/OffsetAwareConfigurationLoader.java +@@ -49,7 +49,6 @@ public class OffsetAwareConfigurationLoader extends YamlConfigurationLoader + + String sep = File.pathSeparator; + +- config.rpc_port += offset; + config.native_transport_port += offset; + config.storage_port += offset; + +diff --git a/test/unit/org/apache/cassandra/Util.java b/test/unit/org/apache/cassandra/Util.java +index 7bcee7a..328c2d8 100644 +--- a/test/unit/org/apache/cassandra/Util.java ++++ b/test/unit/org/apache/cassandra/Util.java +@@ -668,7 +668,6 @@ public class Util + { + super(original.isDigestQuery(), + original.digestVersion(), +- original.isForThrift(), + original.metadata(), + original.nowInSec(), + original.columnFilter(), +diff --git a/test/unit/org/apache/cassandra/client/TestRingCache.java b/test/unit/org/apache/cassandra/client/TestRingCache.java +deleted file mode 100644 +index 51bf566..0000000 +--- a/test/unit/org/apache/cassandra/client/TestRingCache.java ++++ /dev/null +@@ -1,121 +0,0 @@ +-/** +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.client; +- +-import java.net.InetAddress; +-import java.nio.ByteBuffer; +-import java.util.Collection; +- +-import org.apache.cassandra.config.DatabaseDescriptor; +-import org.apache.cassandra.hadoop.ConfigHelper; +-import org.apache.cassandra.thrift.Cassandra; +-import org.apache.cassandra.thrift.Column; +-import org.apache.cassandra.thrift.ColumnParent; +-import org.apache.cassandra.thrift.ColumnPath; +-import org.apache.cassandra.thrift.ConsistencyLevel; +-import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.commons.lang3.StringUtils; +-import org.apache.hadoop.conf.Configuration; +-import org.apache.thrift.protocol.TBinaryProtocol; +-import org.apache.thrift.transport.TFramedTransport; +-import org.apache.thrift.transport.TSocket; +- +- +-/** +- * Sample code that uses RingCache in the client. +- */ +-public class TestRingCache +-{ +- private RingCache ringCache; +- private Cassandra.Client thriftClient; +- private Configuration conf; +- +- public TestRingCache(String keyspace) +- { +- ConfigHelper.setOutputColumnFamily(conf, keyspace, "Standard1"); +- ringCache = new RingCache(conf); +- } +- +- private void setup(String server, int port) throws Exception +- { +- /* Establish a thrift connection to the cassandra instance */ +- TSocket socket = new TSocket(server, port); +- System.out.println(" connected to " + server + ":" + port + "."); +- TBinaryProtocol binaryProtocol = new TBinaryProtocol(new TFramedTransport(socket)); +- Cassandra.Client cassandraClient = new Cassandra.Client(binaryProtocol); +- socket.open(); +- thriftClient = cassandraClient; +- String seed = DatabaseDescriptor.getSeeds().iterator().next().getHostAddress(); +- conf = new Configuration(); +- ConfigHelper.setOutputPartitioner(conf, DatabaseDescriptor.getPartitioner().getClass().getName()); +- ConfigHelper.setOutputInitialAddress(conf, seed); +- ConfigHelper.setOutputRpcPort(conf, Integer.toString(DatabaseDescriptor.getRpcPort())); +- +- } +- +- /** +- * usage: java -cp org.apache.cassandra.client.TestRingCache [keyspace row-id-prefix row-id-int] +- * to test a single keyspace/row, use the parameters. row-id-prefix and row-id-int are appended together to form a +- * single row id. If you supply now parameters, 'Keyspace1' is assumed and will check 9 rows ('row1' through 'row9'). +- * @param args +- * @throws Exception +- */ +- public static void main(String[] args) throws Throwable +- { +- int minRow; +- int maxRow; +- String rowPrefix, keyspace = "Keyspace1"; +- +- if (args.length > 0) +- { +- keyspace = args[0]; +- rowPrefix = args[1]; +- minRow = Integer.parseInt(args[2]); +- maxRow = minRow + 1; +- } +- else +- { +- minRow = 1; +- maxRow = 10; +- rowPrefix = "row"; +- } +- +- TestRingCache tester = new TestRingCache(keyspace); +- +- for (int nRows = minRow; nRows < maxRow; nRows++) +- { +- ByteBuffer row = ByteBufferUtil.bytes((rowPrefix + nRows)); +- ColumnPath col = new ColumnPath("Standard1").setSuper_column((ByteBuffer)null).setColumn("col1".getBytes()); +- ColumnParent parent = new ColumnParent("Standard1").setSuper_column((ByteBuffer)null); +- +- Collection endpoints = tester.ringCache.getEndpoint(row); +- InetAddress firstEndpoint = endpoints.iterator().next(); +- System.out.printf("hosts with key %s : %s; choose %s%n", +- new String(row.array()), StringUtils.join(endpoints, ","), firstEndpoint); +- +- // now, read the row back directly from the host owning the row locally +- tester.setup(firstEndpoint.getHostAddress(), DatabaseDescriptor.getRpcPort()); +- tester.thriftClient.set_keyspace(keyspace); +- tester.thriftClient.insert(row, parent, new Column(ByteBufferUtil.bytes("col1")).setValue(ByteBufferUtil.bytes("val1")).setTimestamp(1), ConsistencyLevel.ONE); +- Column column = tester.thriftClient.get(row, col, ConsistencyLevel.ONE).column; +- System.out.println("read row " + new String(row.array()) + " " + new String(column.name.array()) + ":" + new String(column.value.array()) + ":" + column.timestamp); +- } +- +- System.exit(1); +- } +-} +diff --git a/test/unit/org/apache/cassandra/config/CFMetaDataTest.java b/test/unit/org/apache/cassandra/config/CFMetaDataTest.java +index 8616987..654b516 100644 +--- a/test/unit/org/apache/cassandra/config/CFMetaDataTest.java ++++ b/test/unit/org/apache/cassandra/config/CFMetaDataTest.java +@@ -31,10 +31,6 @@ import org.apache.cassandra.db.partitions.PartitionUpdate; + import org.apache.cassandra.db.rows.UnfilteredRowIterators; + import org.apache.cassandra.exceptions.ConfigurationException; + import org.apache.cassandra.schema.*; +-import org.apache.cassandra.thrift.CfDef; +-import org.apache.cassandra.thrift.ColumnDef; +-import org.apache.cassandra.thrift.IndexType; +-import org.apache.cassandra.thrift.ThriftConversion; + import org.apache.cassandra.utils.ByteBufferUtil; + import org.apache.cassandra.utils.FBUtilities; + +@@ -50,27 +46,6 @@ public class CFMetaDataTest + private static final String KEYSPACE1 = "CFMetaDataTest1"; + private static final String CF_STANDARD1 = "Standard1"; + +- private static List columnDefs = new ArrayList(); +- +- static +- { +- columnDefs.add(new ColumnDef(ByteBufferUtil.bytes("col1"), AsciiType.class.getCanonicalName()) +- .setIndex_name("col1Index") +- .setIndex_type(IndexType.KEYS)); +- +- columnDefs.add(new ColumnDef(ByteBufferUtil.bytes("col2"), UTF8Type.class.getCanonicalName()) +- .setIndex_name("col2Index") +- .setIndex_type(IndexType.KEYS)); +- +- Map customIndexOptions = new HashMap<>(); +- customIndexOptions.put("option1", "value1"); +- customIndexOptions.put("option2", "value2"); +- columnDefs.add(new ColumnDef(ByteBufferUtil.bytes("col3"), Int32Type.class.getCanonicalName()) +- .setIndex_name("col3Index") +- .setIndex_type(IndexType.CUSTOM) +- .setIndex_options(customIndexOptions)); +- } +- + @BeforeClass + public static void defineSchema() throws ConfigurationException + { +@@ -81,45 +56,6 @@ public class CFMetaDataTest + } + + @Test +- public void testThriftConversion() throws Exception +- { +- CfDef cfDef = new CfDef().setDefault_validation_class(AsciiType.class.getCanonicalName()) +- .setComment("Test comment") +- .setColumn_metadata(columnDefs) +- .setKeyspace(KEYSPACE1) +- .setName(CF_STANDARD1); +- +- // convert Thrift to CFMetaData +- CFMetaData cfMetaData = ThriftConversion.fromThrift(cfDef); +- +- CfDef thriftCfDef = new CfDef(); +- thriftCfDef.keyspace = KEYSPACE1; +- thriftCfDef.name = CF_STANDARD1; +- thriftCfDef.default_validation_class = cfDef.default_validation_class; +- thriftCfDef.comment = cfDef.comment; +- thriftCfDef.column_metadata = new ArrayList<>(); +- for (ColumnDef columnDef : columnDefs) +- { +- ColumnDef c = new ColumnDef(); +- c.name = ByteBufferUtil.clone(columnDef.name); +- c.validation_class = columnDef.getValidation_class(); +- c.index_name = columnDef.getIndex_name(); +- c.index_type = columnDef.getIndex_type(); +- if (columnDef.isSetIndex_options()) +- c.setIndex_options(columnDef.getIndex_options()); +- thriftCfDef.column_metadata.add(c); +- } +- +- CfDef converted = ThriftConversion.toThrift(cfMetaData); +- +- assertEquals(thriftCfDef.keyspace, converted.keyspace); +- assertEquals(thriftCfDef.name, converted.name); +- assertEquals(thriftCfDef.default_validation_class, converted.default_validation_class); +- assertEquals(thriftCfDef.comment, converted.comment); +- assertEquals(new HashSet<>(thriftCfDef.column_metadata), new HashSet<>(converted.column_metadata)); +- } +- +- @Test + public void testConversionsInverses() throws Exception + { + for (String keyspaceName : Schema.instance.getNonSystemKeyspaces()) +@@ -127,9 +63,6 @@ public class CFMetaDataTest + for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores()) + { + CFMetaData cfm = cfs.metadata; +- if (!cfm.isThriftCompatible()) +- continue; +- + checkInverses(cfm); + + // Testing with compression to catch #3558 +@@ -144,11 +77,6 @@ public class CFMetaDataTest + { + KeyspaceMetadata keyspace = Schema.instance.getKSMetaData(cfm.ksName); + +- // Test thrift conversion +- CFMetaData before = cfm; +- CFMetaData after = ThriftConversion.fromThriftForUpdate(ThriftConversion.toThrift(before), before); +- assert before.equals(after) : String.format("%n%s%n!=%n%s", before, after); +- + // Test schema conversion + Mutation rm = SchemaKeyspace.makeCreateTableMutation(keyspace, cfm, FBUtilities.timestampMicros()).build(); + PartitionUpdate cfU = rm.getPartitionUpdate(Schema.instance.getId(SchemaKeyspace.NAME, SchemaKeyspace.TABLES)); +diff --git a/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java b/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java +deleted file mode 100644 +index 933d231..0000000 +--- a/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java ++++ /dev/null +@@ -1,55 +0,0 @@ +-package org.apache.cassandra.config; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +-import org.junit.Assert; +-import org.junit.Test; +- +-import org.apache.cassandra.db.marshal.*; +-import org.apache.cassandra.thrift.ThriftConversion; +-import org.apache.cassandra.utils.ByteBufferUtil; +- +-public class ColumnDefinitionTest +-{ +- @Test +- public void testSerializeDeserialize() throws Exception +- { +- CFMetaData cfm = CFMetaData.Builder.create("ks", "cf", true, false, false) +- .addPartitionKey("pkey", AsciiType.instance) +- .addClusteringColumn("name", AsciiType.instance) +- .addRegularColumn("val", AsciiType.instance) +- .build(); +- +- ColumnDefinition cd0 = ColumnDefinition.staticDef(cfm, ByteBufferUtil.bytes("TestColumnDefinitionName0"), BytesType.instance); +- ColumnDefinition cd1 = ColumnDefinition.staticDef(cfm, ByteBufferUtil.bytes("TestColumnDefinition1"), LongType.instance); +- +- testSerializeDeserialize(cfm, cd0); +- testSerializeDeserialize(cfm, cd1); +- } +- +- protected void testSerializeDeserialize(CFMetaData cfm, ColumnDefinition cd) throws Exception +- { +- ColumnDefinition newCd = ThriftConversion.fromThrift(cfm.ksName, cfm.cfName, cfm.comparator.subtype(0), null, ThriftConversion.toThrift(cfm, cd)); +- Assert.assertNotSame(cd, newCd); +- Assert.assertEquals(cd.hashCode(), newCd.hashCode()); +- Assert.assertEquals(cd, newCd); +- } +-} +diff --git a/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java b/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java +index 84f0235..6769eb7 100644 +--- a/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java ++++ b/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java +@@ -39,7 +39,6 @@ import org.apache.cassandra.gms.Gossiper; + import org.apache.cassandra.schema.KeyspaceMetadata; + import org.apache.cassandra.schema.KeyspaceParams; + import org.apache.cassandra.service.MigrationManager; +-import org.apache.cassandra.thrift.ThriftConversion; + + import static org.junit.Assert.assertEquals; + import static org.junit.Assert.assertNotNull; +@@ -50,34 +49,6 @@ import static org.junit.Assert.assertTrue; + @RunWith(OrderedJUnit4ClassRunner.class) + public class DatabaseDescriptorTest + { +- @Test +- public void testCFMetaDataSerialization() throws ConfigurationException, InvalidRequestException +- { +- // test serialization of all defined test CFs. +- for (String keyspaceName : Schema.instance.getNonSystemKeyspaces()) +- { +- for (CFMetaData cfm : Schema.instance.getTablesAndViews(keyspaceName)) +- { +- CFMetaData cfmDupe = ThriftConversion.fromThrift(ThriftConversion.toThrift(cfm)); +- assertNotNull(cfmDupe); +- assertEquals(cfm, cfmDupe); +- } +- } +- } +- +- @Test +- public void testKSMetaDataSerialization() throws ConfigurationException +- { +- for (String ks : Schema.instance.getNonSystemKeyspaces()) +- { +- // Not testing round-trip on the KsDef via serDe() because maps +- KeyspaceMetadata ksm = Schema.instance.getKSMetaData(ks); +- KeyspaceMetadata ksmDupe = ThriftConversion.fromThrift(ThriftConversion.toThrift(ksm)); +- assertNotNull(ksmDupe); +- assertEquals(ksm, ksmDupe); +- } +- } +- + // this came as a result of CASSANDRA-995 + @Test + public void testTransKsMigration() throws ConfigurationException, IOException +diff --git a/test/unit/org/apache/cassandra/cql3/CQLTester.java b/test/unit/org/apache/cassandra/cql3/CQLTester.java +index d0eeb4c..1be7e76 100644 +--- a/test/unit/org/apache/cassandra/cql3/CQLTester.java ++++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java +@@ -725,7 +725,7 @@ public abstract class CQLTester + + protected ResultMessage.Prepared prepare(String query) throws Throwable + { +- return QueryProcessor.prepare(formatQuery(query), ClientState.forInternalCalls(), false); ++ return QueryProcessor.prepare(formatQuery(query), ClientState.forInternalCalls()); + } + + protected UntypedResultSet execute(String query, Object... values) throws Throwable +diff --git a/test/unit/org/apache/cassandra/cql3/ThriftCompatibilityTest.java b/test/unit/org/apache/cassandra/cql3/ThriftCompatibilityTest.java +deleted file mode 100644 +index ff2af56..0000000 +--- a/test/unit/org/apache/cassandra/cql3/ThriftCompatibilityTest.java ++++ /dev/null +@@ -1,112 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.cql3; +- +-import java.util.Arrays; +-import java.util.Collections; +- +-import org.junit.Test; +- +-import com.sun.org.apache.xerces.internal.impl.xs.models.CMNodeFactory; +-import org.apache.cassandra.SchemaLoader; +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.config.ColumnDefinition; +-import org.apache.cassandra.config.Schema; +-import org.apache.cassandra.db.marshal.BytesType; +-import org.apache.cassandra.db.marshal.Int32Type; +-import org.apache.cassandra.db.marshal.UTF8Type; +-import org.apache.cassandra.schema.KeyspaceParams; +-import org.apache.cassandra.service.MigrationManager; +-import org.apache.cassandra.thrift.CfDef; +-import org.apache.cassandra.thrift.ColumnDef; +-import org.apache.cassandra.thrift.ThriftConversion; +-import org.apache.cassandra.utils.ByteBufferUtil; +- +-import static junit.framework.Assert.assertFalse; +-import static junit.framework.Assert.assertTrue; +-import static org.junit.Assert.assertEquals; +-import static org.apache.cassandra.utils.ByteBufferUtil.bytes; +- +-public class ThriftCompatibilityTest extends SchemaLoader +-{ +- @Test // test for CASSANDRA-8178 +- public void testNonTextComparator() throws Throwable +- { +- ColumnDef column = new ColumnDef(); +- column.setName(bytes(42)) +- .setValidation_class(UTF8Type.instance.toString()); +- +- CfDef cf = new CfDef("thriftcompat", "JdbcInteger"); +- cf.setColumn_type("Standard") +- .setComparator_type(Int32Type.instance.toString()) +- .setDefault_validation_class(UTF8Type.instance.toString()) +- .setKey_validation_class(BytesType.instance.toString()) +- .setColumn_metadata(Collections.singletonList(column)); +- +- SchemaLoader.createKeyspace("thriftcompat", KeyspaceParams.simple(1), ThriftConversion.fromThrift(cf)); +- +- // the comparator is IntegerType, and there is a column named 42 with a UTF8Type validation type +- execute("INSERT INTO \"thriftcompat\".\"JdbcInteger\" (key, \"42\") VALUES (0x00000001, 'abc')"); +- execute("UPDATE \"thriftcompat\".\"JdbcInteger\" SET \"42\" = 'abc' WHERE key = 0x00000001"); +- execute("DELETE \"42\" FROM \"thriftcompat\".\"JdbcInteger\" WHERE key = 0x00000000"); +- UntypedResultSet results = execute("SELECT key, \"42\" FROM \"thriftcompat\".\"JdbcInteger\""); +- assertEquals(1, results.size()); +- UntypedResultSet.Row row = results.iterator().next(); +- assertEquals(ByteBufferUtil.bytes(1), row.getBytes("key")); +- assertEquals("abc", row.getString("42")); +- } +- +- @Test // test for CASSANDRA-9867 +- public void testDropCompactStaticColumn() +- { +- ColumnDef column1 = new ColumnDef(); +- column1.setName(bytes(42)) +- .setValidation_class(UTF8Type.instance.toString()); +- +- ColumnDef column2 = new ColumnDef(); +- column2.setName(bytes(25)) +- .setValidation_class(UTF8Type.instance.toString()); +- +- CfDef cf = new CfDef("thriftks", "staticcompact"); +- cf.setColumn_type("Standard") +- .setComparator_type(Int32Type.instance.toString()) +- .setDefault_validation_class(UTF8Type.instance.toString()) +- .setKey_validation_class(BytesType.instance.toString()) +- .setColumn_metadata(Arrays.asList(column1, column2)); +- +- SchemaLoader.createKeyspace("thriftks", KeyspaceParams.simple(1), ThriftConversion.fromThrift(cf)); +- CFMetaData cfm = Schema.instance.getCFMetaData("thriftks", "staticcompact"); +- +- // assert the both columns are in the metadata +- assertTrue(cfm.getColumnMetadata().containsKey(bytes(42))); +- assertTrue(cfm.getColumnMetadata().containsKey(bytes(25))); +- +- // remove column2 +- cf.setColumn_metadata(Collections.singletonList(column1)); +- MigrationManager.announceColumnFamilyUpdate(ThriftConversion.fromThriftForUpdate(cf, cfm), true); +- +- // assert that it's gone from metadata +- assertTrue(cfm.getColumnMetadata().containsKey(bytes(42))); +- assertFalse(cfm.getColumnMetadata().containsKey(bytes(25))); +- } +- +- private static UntypedResultSet execute(String query) +- { +- return QueryProcessor.executeInternal(query); +- } +-} +diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java +index 2072bf2..2f3bf27 100644 +--- a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java ++++ b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java +@@ -1012,16 +1012,13 @@ public class SecondaryIndexTest extends CQLTester + { + createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY ((a), b))"); + createIndex("CREATE INDEX c_idx ON %s(c)"); +- MD5Digest cqlId = prepareStatement("SELECT * FROM %s.%s WHERE c=?", false).statementId; +- Integer thriftId = prepareStatement("SELECT * FROM %s.%s WHERE c=?", true).toThriftPreparedResult().getItemId(); ++ MD5Digest cqlId = prepareStatement("SELECT * FROM %s.%s WHERE c=?").statementId; + + assertNotNull(QueryProcessor.instance.getPrepared(cqlId)); +- assertNotNull(QueryProcessor.instance.getPreparedForThrift(thriftId)); + + dropIndex("DROP INDEX %s.c_idx"); + + assertNull(QueryProcessor.instance.getPrepared(cqlId)); +- assertNull(QueryProcessor.instance.getPreparedForThrift(thriftId)); + } + + // See CASSANDRA-11021 +@@ -1177,11 +1174,10 @@ public class SecondaryIndexTest extends CQLTester + row(bytes("foo124"), EMPTY_BYTE_BUFFER)); + } + +- private ResultMessage.Prepared prepareStatement(String cql, boolean forThrift) ++ private ResultMessage.Prepared prepareStatement(String cql) + { + return QueryProcessor.prepare(String.format(cql, KEYSPACE, currentTable()), +- ClientState.forInternalCalls(), +- forThrift); ++ ClientState.forInternalCalls()); + } + + private void validateCell(Cell cell, ColumnDefinition def, ByteBuffer val, long timestamp) +diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/UFTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/UFTest.java +index e7c46a5..0d67b9f 100644 +--- a/test/unit/org/apache/cassandra/cql3/validation/entities/UFTest.java ++++ b/test/unit/org/apache/cassandra/cql3/validation/entities/UFTest.java +@@ -173,16 +173,16 @@ public class UFTest extends CQLTester + + ResultMessage.Prepared preparedSelect1 = QueryProcessor.prepare( + String.format("SELECT key, %s(d) FROM %s.%s", fSin, KEYSPACE, currentTable()), +- ClientState.forInternalCalls(), false); ++ ClientState.forInternalCalls()); + ResultMessage.Prepared preparedSelect2 = QueryProcessor.prepare( + String.format("SELECT key FROM %s.%s", KEYSPACE, currentTable()), +- ClientState.forInternalCalls(), false); ++ ClientState.forInternalCalls()); + ResultMessage.Prepared preparedInsert1 = QueryProcessor.prepare( + String.format("INSERT INTO %s.%s (key, d) VALUES (?, %s(?))", KEYSPACE, currentTable(), fSin), +- ClientState.forInternalCalls(), false); ++ ClientState.forInternalCalls()); + ResultMessage.Prepared preparedInsert2 = QueryProcessor.prepare( + String.format("INSERT INTO %s.%s (key, d) VALUES (?, ?)", KEYSPACE, currentTable()), +- ClientState.forInternalCalls(), false); ++ ClientState.forInternalCalls()); + + Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect1.statementId)); + Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect2.statementId)); +@@ -207,10 +207,10 @@ public class UFTest extends CQLTester + + preparedSelect1= QueryProcessor.prepare( + String.format("SELECT key, %s(d) FROM %s.%s", fSin, KEYSPACE, currentTable()), +- ClientState.forInternalCalls(), false); ++ ClientState.forInternalCalls()); + preparedInsert1 = QueryProcessor.prepare( + String.format("INSERT INTO %s.%s (key, d) VALUES (?, %s(?))", KEYSPACE, currentTable(), fSin), +- ClientState.forInternalCalls(), false); ++ ClientState.forInternalCalls()); + Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect1.statementId)); + Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert1.statementId)); + +@@ -276,7 +276,7 @@ public class UFTest extends CQLTester + KEYSPACE, + currentTable(), + literalArgs), +- ClientState.forInternalCalls(), false); ++ ClientState.forInternalCalls()); + Assert.assertNotNull(QueryProcessor.instance.getPrepared(prepared.statementId)); + return prepared; + } +@@ -292,7 +292,7 @@ public class UFTest extends CQLTester + KEYSPACE, + currentTable(), + function), +- ClientState.forInternalCalls(), false); ++ ClientState.forInternalCalls()); + Assert.assertNotNull(QueryProcessor.instance.getPrepared(prepared.statementId)); + return prepared; + } +@@ -307,7 +307,7 @@ public class UFTest extends CQLTester + String.format("INSERT INTO %s.%s (key, val) VALUES (?, ?)", + KEYSPACE, + currentTable()), +- ClientState.forInternalCalls(), false); ++ ClientState.forInternalCalls()); + Assert.assertNotNull(QueryProcessor.instance.getPrepared(control.statementId)); + + // a function that we'll drop and verify that statements which use it to +@@ -1361,7 +1361,7 @@ public class UFTest extends CQLTester + Assert.assertEquals(1, Schema.instance.getFunctions(fNameName).size()); + + ResultMessage.Prepared prepared = QueryProcessor.prepare(String.format("SELECT key, %s(udt) FROM %s.%s", fName, KEYSPACE, currentTable()), +- ClientState.forInternalCalls(), false); ++ ClientState.forInternalCalls()); + Assert.assertNotNull(QueryProcessor.instance.getPrepared(prepared.statementId)); + + // UT still referenced by table +diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/AggregationTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/AggregationTest.java +index 24a9528..ee21bc1 100644 +--- a/test/unit/org/apache/cassandra/cql3/validation/operations/AggregationTest.java ++++ b/test/unit/org/apache/cassandra/cql3/validation/operations/AggregationTest.java +@@ -1074,7 +1074,7 @@ public class AggregationTest extends CQLTester + "SFUNC " + shortFunctionName(fState) + " " + + "STYPE int"); + +- ResultMessage.Prepared prepared = QueryProcessor.prepare("SELECT " + a + "(b) FROM " + otherKS + ".jsdp", ClientState.forInternalCalls(), false); ++ ResultMessage.Prepared prepared = QueryProcessor.prepare("SELECT " + a + "(b) FROM " + otherKS + ".jsdp", ClientState.forInternalCalls()); + assertNotNull(QueryProcessor.instance.getPrepared(prepared.statementId)); + + execute("DROP AGGREGATE " + a + "(int)"); +@@ -1086,7 +1086,7 @@ public class AggregationTest extends CQLTester + "SFUNC " + shortFunctionName(fState) + " " + + "STYPE int"); + +- prepared = QueryProcessor.prepare("SELECT " + a + "(b) FROM " + otherKS + ".jsdp", ClientState.forInternalCalls(), false); ++ prepared = QueryProcessor.prepare("SELECT " + a + "(b) FROM " + otherKS + ".jsdp", ClientState.forInternalCalls()); + assertNotNull(QueryProcessor.instance.getPrepared(prepared.statementId)); + + execute("DROP KEYSPACE " + otherKS + ";"); +diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java +index 8f92403..d5ec711 100644 +--- a/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java ++++ b/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java +@@ -43,13 +43,6 @@ import static junit.framework.Assert.fail; + public class CreateTest extends CQLTester + { + @Test +- public void testCQL3PartitionKeyOnlyTable() +- { +- createTable("CREATE TABLE %s (id text PRIMARY KEY);"); +- assertFalse(currentTableMetadata().isThriftCompatible()); +- } +- +- @Test + public void testCreateTableWithSmallintColumns() throws Throwable + { + createTable("CREATE TABLE %s (a text, b smallint, c smallint, primary key (a, b));"); +diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java +index af43152..a2df7c6 100644 +--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java ++++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java +@@ -156,93 +156,6 @@ public class ColumnFamilyStoreTest + reTest(cfs, r); + } + +- // TODO: Implement this once we have hooks to super columns available in CQL context +-// @Test +-// public void testDeleteSuperRowSticksAfterFlush() throws Throwable +-// { +-// String keyspaceName = KEYSPACE1; +-// String cfName= CF_SUPER1; +-// +-// Keyspace keyspace = Keyspace.open(keyspaceName); +-// ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName); +-// +-// ByteBuffer scfName = ByteBufferUtil.bytes("SuperDuper"); +-// DecoratedKey key = Util.dk("flush-resurrection"); +-// +-// // create an isolated sstable. +-// putColSuper(cfs, key, 0, ByteBufferUtil.bytes("val"), ByteBufferUtil.bytes(1L), ByteBufferUtil.bytes(1L), ByteBufferUtil.bytes("val1")); +- +-// putColsSuper(cfs, key, scfName, +-// new BufferCell(cellname(1L), ByteBufferUtil.bytes("val1"), 1), +-// new BufferCell(cellname(2L), ByteBufferUtil.bytes("val2"), 1), +-// new BufferCell(cellname(3L), ByteBufferUtil.bytes("val3"), 1)); +-// cfs.forceBlockingFlush(); +-// +-// // insert, don't flush. +-// putColsSuper(cfs, key, scfName, +-// new BufferCell(cellname(4L), ByteBufferUtil.bytes("val4"), 1), +-// new BufferCell(cellname(5L), ByteBufferUtil.bytes("val5"), 1), +-// new BufferCell(cellname(6L), ByteBufferUtil.bytes("val6"), 1)); +-// +-// // verify insert. +-// final SlicePredicate sp = new SlicePredicate(); +-// sp.setSlice_range(new SliceRange()); +-// sp.getSlice_range().setCount(100); +-// sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY); +-// sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY); +-// +-// assertRowAndColCount(1, 6, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100)); +-// +-// // delete +-// Mutation rm = new Mutation(keyspace.getName(), key.getKey()); +-// rm.deleteRange(cfName, SuperColumns.startOf(scfName), SuperColumns.endOf(scfName), 2); +-// rm.applyUnsafe(); +-// +-// // verify delete. +-// assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100)); +-// +-// // flush +-// cfs.forceBlockingFlush(); +-// +-// // re-verify delete. +-// assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100)); +-// +-// // late insert. +-// putColsSuper(cfs, key, scfName, +-// new BufferCell(cellname(4L), ByteBufferUtil.bytes("val4"), 1L), +-// new BufferCell(cellname(7L), ByteBufferUtil.bytes("val7"), 1L)); +-// +-// // re-verify delete. +-// assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100)); +-// +-// // make sure new writes are recognized. +-// putColsSuper(cfs, key, scfName, +-// new BufferCell(cellname(3L), ByteBufferUtil.bytes("val3"), 3), +-// new BufferCell(cellname(8L), ByteBufferUtil.bytes("val8"), 3), +-// new BufferCell(cellname(9L), ByteBufferUtil.bytes("val9"), 3)); +-// assertRowAndColCount(1, 3, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100)); +-// } +- +-// private static void assertRowAndColCount(int rowCount, int colCount, boolean isDeleted, Collection rows) throws CharacterCodingException +-// { +-// assert rows.size() == rowCount : "rowcount " + rows.size(); +-// for (Row row : rows) +-// { +-// assert row.cf != null : "cf was null"; +-// assert row.cf.getColumnCount() == colCount : "colcount " + row.cf.getColumnCount() + "|" + str(row.cf); +-// if (isDeleted) +-// assert row.cf.isMarkedForDelete() : "cf not marked for delete"; +-// } +-// } +-// +-// private static String str(ColumnFamily cf) throws CharacterCodingException +-// { +-// StringBuilder sb = new StringBuilder(); +-// for (Cell col : cf.getSortedColumns()) +-// sb.append(String.format("(%s,%s,%d),", ByteBufferUtil.string(col.name().toByteBuffer()), ByteBufferUtil.string(col.value()), col.timestamp())); +-// return sb.toString(); +-// } +- + @Test + public void testDeleteStandardRowSticksAfterFlush() throws Throwable + { +diff --git a/test/unit/org/apache/cassandra/db/LegacyCellNameTest.java b/test/unit/org/apache/cassandra/db/LegacyCellNameTest.java +deleted file mode 100644 +index fa29b1e..0000000 +--- a/test/unit/org/apache/cassandra/db/LegacyCellNameTest.java ++++ /dev/null +@@ -1,81 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.db; +- +-import org.junit.Test; +- +-import org.apache.cassandra.config.CFMetaData; +- +-import static junit.framework.Assert.assertTrue; +- +-public class LegacyCellNameTest +-{ +- @Test +- public void testColumnSameNameAsPartitionKeyCompactStorage() throws Exception +- { +- CFMetaData cfm = CFMetaData.compile("CREATE TABLE cs (" + +- "k int PRIMARY KEY, v int)" + +- " WITH COMPACT STORAGE", "ks"); +- +- LegacyLayout.LegacyCellName cellName +- = LegacyLayout.decodeCellName(cfm, +- LegacyLayout.makeLegacyComparator(cfm) +- .fromString("k")); +- +- assertTrue(cellName.column.isRegular()); +- } +- +- @Test +- public void testColumnSameNameAsClusteringKeyCompactStorage() throws Exception +- { +- CFMetaData cfm = CFMetaData.compile("CREATE TABLE cs (" + +- "k int PRIMARY KEY, v int)" + +- " WITH COMPACT STORAGE", "ks"); +- +- LegacyLayout.LegacyCellName cellName +- = LegacyLayout.decodeCellName(cfm, +- LegacyLayout.makeLegacyComparator(cfm) +- .fromString("column1")); +- +- assertTrue(cellName.column.isRegular()); +- } +- +- @Test(expected=IllegalArgumentException.class) +- public void testColumnSameNameAsPartitionKeyCql3() throws Exception +- { +- CFMetaData cfm = CFMetaData.compile("CREATE TABLE cs (" + +- "k int PRIMARY KEY, v int)", "ks"); +- +- LegacyLayout.LegacyCellName cellName +- = LegacyLayout.decodeCellName(cfm, +- LegacyLayout.makeLegacyComparator(cfm) +- .fromString("k")); +- } +- +- @Test(expected=IllegalArgumentException.class) +- public void testColumnSameNameAsClusteringKeyCql3() throws Exception +- { +- CFMetaData cfm = CFMetaData.compile("CREATE TABLE cs (" + +- "k int, c text, v int, PRIMARY KEY(k, c))", "ks"); +- +- LegacyLayout.LegacyCellName cellName +- = LegacyLayout.decodeCellName(cfm, +- LegacyLayout.makeLegacyComparator(cfm) +- .fromString("c")); +- } +-} +diff --git a/test/unit/org/apache/cassandra/db/PartitionRangeReadTest.java b/test/unit/org/apache/cassandra/db/PartitionRangeReadTest.java +index d5fb8fa..e409592 100644 +--- a/test/unit/org/apache/cassandra/db/PartitionRangeReadTest.java ++++ b/test/unit/org/apache/cassandra/db/PartitionRangeReadTest.java +@@ -155,211 +155,5 @@ public class PartitionRangeReadTest + assertTrue(partitions.get(0).iterator().next().getCell(cDef).value().equals(ByteBufferUtil.bytes("2"))); + assertTrue(partitions.get(partitions.size() - 1).iterator().next().getCell(cDef).value().equals(ByteBufferUtil.bytes("6"))); + } +- +- // TODO: Port or remove, depending on what DataLimits.thriftLimits (per cell) looks like +-// @Test +-// public void testRangeSliceColumnsLimit() throws Throwable +-// { +-// String keyspaceName = KEYSPACE1; +-// String cfName = CF_STANDARD1; +-// Keyspace keyspace = Keyspace.open(keyspaceName); +-// ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName); +-// cfs.clearUnsafe(); +-// +-// Cell[] cols = new Cell[5]; +-// for (int i = 0; i < 5; i++) +-// cols[i] = column("c" + i, "value", 1); +-// +-// putColsStandard(cfs, Util.dk("a"), cols[0], cols[1], cols[2], cols[3], cols[4]); +-// putColsStandard(cfs, Util.dk("b"), cols[0], cols[1]); +-// putColsStandard(cfs, Util.dk("c"), cols[0], cols[1], cols[2], cols[3]); +-// cfs.forceBlockingFlush(); +-// +-// SlicePredicate sp = new SlicePredicate(); +-// sp.setSlice_range(new SliceRange()); +-// sp.getSlice_range().setCount(1); +-// sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY); +-// sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY); +-// +-// assertTotalColCount(cfs.getRangeSlice(Util.range("", ""), +-// null, +-// ThriftValidation.asIFilter(sp, cfs.metadata, null), +-// 3, +-// System.currentTimeMillis(), +-// true, +-// false), +-// 3); +-// assertTotalColCount(cfs.getRangeSlice(Util.range("", ""), +-// null, +-// ThriftValidation.asIFilter(sp, cfs.metadata, null), +-// 5, +-// System.currentTimeMillis(), +-// true, +-// false), +-// 5); +-// assertTotalColCount(cfs.getRangeSlice(Util.range("", ""), +-// null, +-// ThriftValidation.asIFilter(sp, cfs.metadata, null), +-// 8, +-// System.currentTimeMillis(), +-// true, +-// false), +-// 8); +-// assertTotalColCount(cfs.getRangeSlice(Util.range("", ""), +-// null, +-// ThriftValidation.asIFilter(sp, cfs.metadata, null), +-// 10, +-// System.currentTimeMillis(), +-// true, +-// false), +-// 10); +-// assertTotalColCount(cfs.getRangeSlice(Util.range("", ""), +-// null, +-// ThriftValidation.asIFilter(sp, cfs.metadata, null), +-// 100, +-// System.currentTimeMillis(), +-// true, +-// false), +-// 11); +-// +-// // Check that when querying by name, we always include all names for a +-// // gien row even if it means returning more columns than requested (this is necesseray for CQL) +-// sp = new SlicePredicate(); +-// sp.setColumn_names(Arrays.asList( +-// ByteBufferUtil.bytes("c0"), +-// ByteBufferUtil.bytes("c1"), +-// ByteBufferUtil.bytes("c2") +-// )); +-// +-// assertTotalColCount(cfs.getRangeSlice(Util.range("", ""), +-// null, +-// ThriftValidation.asIFilter(sp, cfs.metadata, null), +-// 1, +-// System.currentTimeMillis(), +-// true, +-// false), +-// 3); +-// assertTotalColCount(cfs.getRangeSlice(Util.range("", ""), +-// null, +-// ThriftValidation.asIFilter(sp, cfs.metadata, null), +-// 4, +-// System.currentTimeMillis(), +-// true, +-// false), +-// 5); +-// assertTotalColCount(cfs.getRangeSlice(Util.range("", ""), +-// null, +-// ThriftValidation.asIFilter(sp, cfs.metadata, null), +-// 5, +-// System.currentTimeMillis(), +-// true, +-// false), +-// 5); +-// assertTotalColCount(cfs.getRangeSlice(Util.range("", ""), +-// null, +-// ThriftValidation.asIFilter(sp, cfs.metadata, null), +-// 6, +-// System.currentTimeMillis(), +-// true, +-// false), +-// 8); +-// assertTotalColCount(cfs.getRangeSlice(Util.range("", ""), +-// null, +-// ThriftValidation.asIFilter(sp, cfs.metadata, null), +-// 100, +-// System.currentTimeMillis(), +-// true, +-// false), +-// 8); +-// } +- +- // TODO: Port or remove, depending on what DataLimits.thriftLimits (per cell) looks like +-// @Test +-// public void testRangeSlicePaging() throws Throwable +-// { +-// String keyspaceName = KEYSPACE1; +-// String cfName = CF_STANDARD1; +-// Keyspace keyspace = Keyspace.open(keyspaceName); +-// ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName); +-// cfs.clearUnsafe(); +-// +-// Cell[] cols = new Cell[4]; +-// for (int i = 0; i < 4; i++) +-// cols[i] = column("c" + i, "value", 1); +-// +-// DecoratedKey ka = Util.dk("a"); +-// DecoratedKey kb = Util.dk("b"); +-// DecoratedKey kc = Util.dk("c"); +-// +-// PartitionPosition min = Util.rp(""); +-// +-// putColsStandard(cfs, ka, cols[0], cols[1], cols[2], cols[3]); +-// putColsStandard(cfs, kb, cols[0], cols[1], cols[2]); +-// putColsStandard(cfs, kc, cols[0], cols[1], cols[2], cols[3]); +-// cfs.forceBlockingFlush(); +-// +-// SlicePredicate sp = new SlicePredicate(); +-// sp.setSlice_range(new SliceRange()); +-// sp.getSlice_range().setCount(1); +-// sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY); +-// sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY); +-// +-// Collection rows; +-// Row row, row1, row2; +-// IDiskAtomFilter filter = ThriftValidation.asIFilter(sp, cfs.metadata, null); +-// +-// rows = cfs.getRangeSlice(cfs.makeExtendedFilter(Util.range("", ""), filter, null, 3, true, true, System.currentTimeMillis())); +-// assert rows.size() == 1 : "Expected 1 row, got " + toString(rows); +-// row = rows.iterator().next(); +-// assertColumnNames(row, "c0", "c1", "c2"); +-// +-// sp.getSlice_range().setStart(ByteBufferUtil.getArray(ByteBufferUtil.bytes("c2"))); +-// filter = ThriftValidation.asIFilter(sp, cfs.metadata, null); +-// rows = cfs.getRangeSlice(cfs.makeExtendedFilter(new Bounds(ka, min), filter, null, 3, true, true, System.currentTimeMillis())); +-// assert rows.size() == 2 : "Expected 2 rows, got " + toString(rows); +-// Iterator iter = rows.iterator(); +-// row1 = iter.next(); +-// row2 = iter.next(); +-// assertColumnNames(row1, "c2", "c3"); +-// assertColumnNames(row2, "c0"); +-// +-// sp.getSlice_range().setStart(ByteBufferUtil.getArray(ByteBufferUtil.bytes("c0"))); +-// filter = ThriftValidation.asIFilter(sp, cfs.metadata, null); +-// rows = cfs.getRangeSlice(cfs.makeExtendedFilter(new Bounds(row2.key, min), filter, null, 3, true, true, System.currentTimeMillis())); +-// assert rows.size() == 1 : "Expected 1 row, got " + toString(rows); +-// row = rows.iterator().next(); +-// assertColumnNames(row, "c0", "c1", "c2"); +-// +-// sp.getSlice_range().setStart(ByteBufferUtil.getArray(ByteBufferUtil.bytes("c2"))); +-// filter = ThriftValidation.asIFilter(sp, cfs.metadata, null); +-// rows = cfs.getRangeSlice(cfs.makeExtendedFilter(new Bounds(row.key, min), filter, null, 3, true, true, System.currentTimeMillis())); +-// assert rows.size() == 2 : "Expected 2 rows, got " + toString(rows); +-// iter = rows.iterator(); +-// row1 = iter.next(); +-// row2 = iter.next(); +-// assertColumnNames(row1, "c2"); +-// assertColumnNames(row2, "c0", "c1"); +-// +-// // Paging within bounds +-// SliceQueryFilter sf = new SliceQueryFilter(cellname("c1"), +-// cellname("c2"), +-// false, +-// 0); +-// rows = cfs.getRangeSlice(cfs.makeExtendedFilter(new Bounds(ka, kc), sf, cellname("c2"), cellname("c1"), null, 2, true, System.currentTimeMillis())); +-// assert rows.size() == 2 : "Expected 2 rows, got " + toString(rows); +-// iter = rows.iterator(); +-// row1 = iter.next(); +-// row2 = iter.next(); +-// assertColumnNames(row1, "c2"); +-// assertColumnNames(row2, "c1"); +-// +-// rows = cfs.getRangeSlice(cfs.makeExtendedFilter(new Bounds(kb, kc), sf, cellname("c1"), cellname("c1"), null, 10, true, System.currentTimeMillis())); +-// assert rows.size() == 2 : "Expected 2 rows, got " + toString(rows); +-// iter = rows.iterator(); +-// row1 = iter.next(); +-// row2 = iter.next(); +-// assertColumnNames(row1, "c1", "c2"); +-// assertColumnNames(row2, "c1"); +-// } + } + +diff --git a/test/unit/org/apache/cassandra/db/ReadResponseTest.java b/test/unit/org/apache/cassandra/db/ReadResponseTest.java +deleted file mode 100644 +index 52ab8bb..0000000 +--- a/test/unit/org/apache/cassandra/db/ReadResponseTest.java ++++ /dev/null +@@ -1,99 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.db; +- +-import java.util.*; +- +-import org.junit.After; +-import org.junit.Before; +-import org.junit.Test; +- +-import org.apache.cassandra.Util; +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.config.DatabaseDescriptor; +-import org.apache.cassandra.cql3.CQLTester; +-import org.apache.cassandra.db.rows.Rows; +-import org.apache.cassandra.db.rows.UnfilteredRowIterators; +-import org.apache.cassandra.db.partitions.ImmutableBTreePartition; +-import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator; +-import org.apache.cassandra.db.marshal.AsciiType; +-import org.apache.cassandra.dht.ByteOrderedPartitioner; +-import org.apache.cassandra.dht.IPartitioner; +- +-import static org.junit.Assert.assertEquals; +- +-public class ReadResponseTest extends CQLTester +-{ +- private IPartitioner partitionerToRestore; +- +- @Before +- public void setupPartitioner() +- { +- // Using an ordered partitioner to be able to predict keys order in the following tests. +- partitionerToRestore = DatabaseDescriptor.setPartitionerUnsafe(ByteOrderedPartitioner.instance); +- } +- +- @After +- public void resetPartitioner() +- { +- DatabaseDescriptor.setPartitionerUnsafe(partitionerToRestore); +- } +- +- @Test +- public void testLegacyResponseSkipWrongBounds() +- { +- createTable("CREATE TABLE %s (k text PRIMARY KEY)"); +- +- ColumnFamilyStore cfs = getCurrentColumnFamilyStore(); +- +- // Test that if a legacy response contains keys at the boundary of the requested key range that shouldn't be present, those +- // are properly skipped. See CASSANDRA-9857 for context. +- +- List responses = Arrays.asList(makePartition(cfs.metadata, "k1"), +- makePartition(cfs.metadata, "k2"), +- makePartition(cfs.metadata, "k3")); +- ReadResponse.LegacyRemoteDataResponse response = new ReadResponse.LegacyRemoteDataResponse(responses); +- +- assertPartitions(response.makeIterator(Util.cmd(cfs).fromKeyExcl("k1").toKeyExcl("k3").build()), "k2"); +- assertPartitions(response.makeIterator(Util.cmd(cfs).fromKeyExcl("k0").toKeyExcl("k3").build()), "k1", "k2"); +- assertPartitions(response.makeIterator(Util.cmd(cfs).fromKeyExcl("k1").toKeyExcl("k4").build()), "k2", "k3"); +- +- assertPartitions(response.makeIterator(Util.cmd(cfs).fromKeyIncl("k1").toKeyExcl("k3").build()), "k1", "k2"); +- assertPartitions(response.makeIterator(Util.cmd(cfs).fromKeyIncl("k1").toKeyExcl("k4").build()), "k1", "k2", "k3"); +- } +- +- private void assertPartitions(UnfilteredPartitionIterator actual, String... expectedKeys) +- { +- int i = 0; +- while (i < expectedKeys.length && actual.hasNext()) +- { +- String actualKey = AsciiType.instance.getString(actual.next().partitionKey().getKey()); +- assertEquals(expectedKeys[i++], actualKey); +- } +- +- if (i < expectedKeys.length) +- throw new AssertionError("Got less results than expected: " + expectedKeys[i] + " is not in the result"); +- if (actual.hasNext()) +- throw new AssertionError("Got more results than expected: first unexpected key is " + AsciiType.instance.getString(actual.next().partitionKey().getKey())); +- } +- +- private static ImmutableBTreePartition makePartition(CFMetaData metadata, String key) +- { +- return ImmutableBTreePartition.create(UnfilteredRowIterators.noRowsIterator(metadata, Util.dk(key), Rows.EMPTY_STATIC_ROW, new DeletionTime(0, 0), false)); +- } +-} +diff --git a/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java b/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java +index 73f97fa..516dfce 100644 +--- a/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java ++++ b/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java +@@ -142,7 +142,7 @@ public class RowIndexEntryTest extends CQLTester + SerializationHeader header = new SerializationHeader(true, cfMeta, cfMeta.partitionColumns(), EncodingStats.NO_STATS); + + // create C-11206 + old serializer instances +- RowIndexEntry.IndexSerializer rieSerializer = new RowIndexEntry.Serializer(cfMeta, version, header); ++ RowIndexEntry.IndexSerializer rieSerializer = new RowIndexEntry.Serializer(version, header); + Pre_C_11206_RowIndexEntry.Serializer oldSerializer = new Pre_C_11206_RowIndexEntry.Serializer(cfMeta, version, header); + + @SuppressWarnings({ "resource", "IOResourceOpenedButNotSafelyClosed" }) +@@ -257,7 +257,7 @@ public class RowIndexEntryTest extends CQLTester + Collection observers, + Version version) throws IOException + { +- assert !iterator.isEmpty() && version.storeRows(); ++ assert !iterator.isEmpty(); + + Builder builder = new Builder(iterator, output, header, observers, version.correspondingMessagingVersion()); + return builder.build(); +@@ -423,7 +423,7 @@ public class RowIndexEntryTest extends CQLTester + SequentialWriter writer = new SequentialWriter(tempFile); + ColumnIndex columnIndex = RowIndexEntryTest.ColumnIndex.writeAndBuildIndex(partition.unfilteredIterator(), writer, header, Collections.emptySet(), BigFormat.latestVersion); + Pre_C_11206_RowIndexEntry withIndex = Pre_C_11206_RowIndexEntry.create(0xdeadbeef, DeletionTime.LIVE, columnIndex); +- IndexInfo.Serializer indexSerializer = cfs.metadata.serializers().indexInfoSerializer(BigFormat.latestVersion, header); ++ IndexInfo.Serializer indexSerializer = IndexInfo.serializer(BigFormat.latestVersion, header); + + // sanity check + assertTrue(columnIndex.columnsIndex.size() >= 3); +@@ -568,14 +568,12 @@ public class RowIndexEntryTest extends CQLTester + + Serializer(CFMetaData metadata, Version version, SerializationHeader header) + { +- this.idxSerializer = metadata.serializers().indexInfoSerializer(version, header); ++ this.idxSerializer = IndexInfo.serializer(version, header); + this.version = version; + } + + public void serialize(Pre_C_11206_RowIndexEntry rie, DataOutputPlus out) throws IOException + { +- assert version.storeRows() : "We read old index files but we should never write them"; +- + out.writeUnsignedVInt(rie.position); + out.writeUnsignedVInt(rie.promotedSize(idxSerializer)); + +@@ -623,35 +621,6 @@ public class RowIndexEntryTest extends CQLTester + + public Pre_C_11206_RowIndexEntry deserialize(DataInputPlus in) throws IOException + { +- if (!version.storeRows()) +- { +- long position = in.readLong(); +- +- int size = in.readInt(); +- if (size > 0) +- { +- DeletionTime deletionTime = DeletionTime.serializer.deserialize(in); +- +- int entries = in.readInt(); +- List columnsIndex = new ArrayList<>(entries); +- +- long headerLength = 0L; +- for (int i = 0; i < entries; i++) +- { +- IndexInfo info = idxSerializer.deserialize(in); +- columnsIndex.add(info); +- if (i == 0) +- headerLength = info.offset; +- } +- +- return new Pre_C_11206_RowIndexEntry.IndexedEntry(position, deletionTime, headerLength, columnsIndex); +- } +- else +- { +- return new Pre_C_11206_RowIndexEntry(position); +- } +- } +- + long position = in.readUnsignedVInt(); + + int size = (int)in.readUnsignedVInt(); +@@ -679,7 +648,7 @@ public class RowIndexEntryTest extends CQLTester + // should be used instead. + static long readPosition(DataInputPlus in, Version version) throws IOException + { +- return version.storeRows() ? in.readUnsignedVInt() : in.readLong(); ++ return in.readUnsignedVInt(); + } + + public static void skip(DataInputPlus in, Version version) throws IOException +@@ -690,7 +659,7 @@ public class RowIndexEntryTest extends CQLTester + + private static void skipPromotedIndex(DataInputPlus in, Version version) throws IOException + { +- int size = version.storeRows() ? (int)in.readUnsignedVInt() : in.readInt(); ++ int size = (int)in.readUnsignedVInt(); + if (size <= 0) + return; + +@@ -699,8 +668,6 @@ public class RowIndexEntryTest extends CQLTester + + public int serializedSize(Pre_C_11206_RowIndexEntry rie) + { +- assert version.storeRows() : "We read old index files but we should never write them"; +- + int indexedSize = 0; + if (rie.isIndexed()) + { +diff --git a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java +index 45a2c1e..795e411 100644 +--- a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java ++++ b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java +@@ -93,61 +93,6 @@ public class SinglePartitionSliceCommandTest + Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE).truncateBlocking(); + } + +- @Test +- public void staticColumnsAreFiltered() throws IOException +- { +- DecoratedKey key = cfm.decorateKey(ByteBufferUtil.bytes("k")); +- +- UntypedResultSet rows; +- +- QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, s, i, v) VALUES ('k', 's', 0, 'v')"); +- QueryProcessor.executeInternal("DELETE v FROM ks.tbl WHERE k='k' AND i=0"); +- QueryProcessor.executeInternal("DELETE FROM ks.tbl WHERE k='k' AND i=0"); +- rows = QueryProcessor.executeInternal("SELECT * FROM ks.tbl WHERE k='k' AND i=0"); +- +- for (UntypedResultSet.Row row: rows) +- { +- logger.debug("Current: k={}, s={}, v={}", (row.has("k") ? row.getString("k") : null), (row.has("s") ? row.getString("s") : null), (row.has("v") ? row.getString("v") : null)); +- } +- +- assert rows.isEmpty(); +- +- ColumnFilter columnFilter = ColumnFilter.selection(PartitionColumns.of(v)); +- ByteBuffer zero = ByteBufferUtil.bytes(0); +- Slices slices = Slices.with(cfm.comparator, Slice.make(ClusteringBound.inclusiveStartOf(zero), ClusteringBound.inclusiveEndOf(zero))); +- ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(slices, false); +- ReadCommand cmd = new SinglePartitionReadCommand(false, MessagingService.VERSION_30, true, cfm, +- FBUtilities.nowInSeconds(), +- columnFilter, +- RowFilter.NONE, +- DataLimits.NONE, +- key, +- sliceFilter); +- +- DataOutputBuffer out = new DataOutputBuffer((int) ReadCommand.legacyReadCommandSerializer.serializedSize(cmd, MessagingService.VERSION_21)); +- ReadCommand.legacyReadCommandSerializer.serialize(cmd, out, MessagingService.VERSION_21); +- DataInputPlus in = new DataInputBuffer(out.buffer(), true); +- cmd = ReadCommand.legacyReadCommandSerializer.deserialize(in, MessagingService.VERSION_21); +- +- logger.debug("ReadCommand: {}", cmd); +- try (ReadExecutionController controller = cmd.executionController(); +- UnfilteredPartitionIterator partitionIterator = cmd.executeLocally(controller)) +- { +- ReadResponse response = ReadResponse.createDataResponse(partitionIterator, cmd); +- +- logger.debug("creating response: {}", response); +- try (UnfilteredPartitionIterator pIter = response.makeIterator(cmd)) +- { +- assert pIter.hasNext(); +- try (UnfilteredRowIterator partition = pIter.next()) +- { +- LegacyLayout.LegacyUnfilteredPartition rowIter = LegacyLayout.fromUnfilteredRowIterator(cmd, partition); +- Assert.assertEquals(Collections.emptyList(), rowIter.cells); +- } +- } +- } +- } +- + private void checkForS(UnfilteredPartitionIterator pi) + { + Assert.assertTrue(pi.toString(), pi.hasNext()); +@@ -172,7 +117,7 @@ public class SinglePartitionSliceCommandTest + + ColumnFilter columnFilter = ColumnFilter.selection(PartitionColumns.of(s)); + ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.NONE, false); +- ReadCommand cmd = new SinglePartitionReadCommand(false, MessagingService.VERSION_30, true, cfm, ++ ReadCommand cmd = new SinglePartitionReadCommand(false, MessagingService.VERSION_30, cfm, + FBUtilities.nowInSeconds(), + columnFilter, + RowFilter.NONE, +diff --git a/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java b/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java +index 3dd798d..3d38d3b 100644 +--- a/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java ++++ b/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java +@@ -239,7 +239,7 @@ public class TTLExpiryTest + cfs.enableAutoCompaction(true); + assertEquals(1, cfs.getLiveSSTables().size()); + SSTableReader sstable = cfs.getLiveSSTables().iterator().next(); +- ISSTableScanner scanner = sstable.getScanner(ColumnFilter.all(sstable.metadata), DataRange.allData(cfs.getPartitioner()), false); ++ ISSTableScanner scanner = sstable.getScanner(ColumnFilter.all(sstable.metadata), DataRange.allData(cfs.getPartitioner())); + assertTrue(scanner.hasNext()); + while(scanner.hasNext()) + { +diff --git a/test/unit/org/apache/cassandra/db/composites/CTypeTest.java b/test/unit/org/apache/cassandra/db/composites/CTypeTest.java +index 9b261e6..999417e 100644 +--- a/test/unit/org/apache/cassandra/db/composites/CTypeTest.java ++++ b/test/unit/org/apache/cassandra/db/composites/CTypeTest.java +@@ -30,42 +30,42 @@ public class CTypeTest + { + CompositeType baseType = CompositeType.getInstance(AsciiType.instance, UUIDType.instance, LongType.instance); + +- ByteBuffer a1 = baseType.builder() +- .add(ByteBufferUtil.bytes("a")) +- .add(UUIDType.instance.fromString("00000000-0000-0000-0000-000000000000")) +- .add(ByteBufferUtil.bytes(1)).build(); +- ByteBuffer a2 = baseType.builder() +- .add(ByteBufferUtil.bytes("a")) +- .add(UUIDType.instance.fromString("00000000-0000-0000-0000-000000000000")) +- .add(ByteBufferUtil.bytes(100)).build(); +- ByteBuffer b1 = baseType.builder() +- .add(ByteBufferUtil.bytes("a")) +- .add(UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff")) +- .add(ByteBufferUtil.bytes(1)).build(); +- ByteBuffer b2 = baseType.builder() +- .add(ByteBufferUtil.bytes("a")) +- .add(UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff")) +- .add(ByteBufferUtil.bytes(100)).build(); +- ByteBuffer c1 = baseType.builder() +- .add(ByteBufferUtil.bytes("z")) +- .add(UUIDType.instance.fromString("00000000-0000-0000-0000-000000000000")) +- .add(ByteBufferUtil.bytes(1)).build(); +- ByteBuffer c2 = baseType.builder() +- .add(ByteBufferUtil.bytes("z")) +- .add(UUIDType.instance.fromString("00000000-0000-0000-0000-000000000000")) +- .add(ByteBufferUtil.bytes(100)).build(); +- ByteBuffer d1 = baseType.builder() +- .add(ByteBufferUtil.bytes("z")) +- .add(UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff")) +- .add(ByteBufferUtil.bytes(1)).build(); +- ByteBuffer d2 = baseType.builder() +- .add(ByteBufferUtil.bytes("z")) +- .add(UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff")) +- .add(ByteBufferUtil.bytes(100)).build(); +- ByteBuffer z1 = baseType.builder() +- .add(ByteBufferUtil.EMPTY_BYTE_BUFFER) +- .add(UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff")) +- .add(ByteBufferUtil.bytes(100)).build(); ++ ByteBuffer a1 = CompositeType.build( ++ ByteBufferUtil.bytes("a"), ++ UUIDType.instance.fromString("00000000-0000-0000-0000-000000000000"), ++ ByteBufferUtil.bytes(1)); ++ ByteBuffer a2 = CompositeType.build( ++ ByteBufferUtil.bytes("a"), ++ UUIDType.instance.fromString("00000000-0000-0000-0000-000000000000"), ++ ByteBufferUtil.bytes(100)); ++ ByteBuffer b1 = CompositeType.build( ++ ByteBufferUtil.bytes("a"), ++ UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff"), ++ ByteBufferUtil.bytes(1)); ++ ByteBuffer b2 = CompositeType.build( ++ ByteBufferUtil.bytes("a"), ++ UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff"), ++ ByteBufferUtil.bytes(100)); ++ ByteBuffer c1 = CompositeType.build( ++ ByteBufferUtil.bytes("z"), ++ UUIDType.instance.fromString("00000000-0000-0000-0000-000000000000"), ++ ByteBufferUtil.bytes(1)); ++ ByteBuffer c2 = CompositeType.build( ++ ByteBufferUtil.bytes("z"), ++ UUIDType.instance.fromString("00000000-0000-0000-0000-000000000000"), ++ ByteBufferUtil.bytes(100)); ++ ByteBuffer d1 = CompositeType.build( ++ ByteBufferUtil.bytes("z"), ++ UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff"), ++ ByteBufferUtil.bytes(1)); ++ ByteBuffer d2 = CompositeType.build( ++ ByteBufferUtil.bytes("z"), ++ UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff"), ++ ByteBufferUtil.bytes(100)); ++ ByteBuffer z1 = CompositeType.build( ++ ByteBufferUtil.EMPTY_BYTE_BUFFER, ++ UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff"), ++ ByteBufferUtil.bytes(100)); + + assert baseType.compare(a1,a2) < 0; + assert baseType.compare(a2,b1) < 0; +@@ -105,8 +105,8 @@ public class CTypeTest + public void testSimpleType2() + { + CompositeType baseType = CompositeType.getInstance(UUIDType.instance); +- ByteBuffer a = baseType.builder().add(UUIDType.instance.fromString("00000000-0000-0000-0000-000000000000")).build(); +- ByteBuffer z = baseType.builder().add(UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff")).build(); ++ ByteBuffer a = CompositeType.build(UUIDType.instance.fromString("00000000-0000-0000-0000-000000000000")); ++ ByteBuffer z = CompositeType.build(UUIDType.instance.fromString("ffffffff-ffff-ffff-ffff-ffffffffffff")); + + assert baseType.compare(a,z) < 0; + assert baseType.compare(z,a) > 0; +@@ -118,8 +118,8 @@ public class CTypeTest + public void testSimpleType1() + { + CompositeType baseType = CompositeType.getInstance(BytesType.instance); +- ByteBuffer a = baseType.builder().add(ByteBufferUtil.bytes("a")).build(); +- ByteBuffer z = baseType.builder().add(ByteBufferUtil.bytes("z")).build(); ++ ByteBuffer a = CompositeType.build(ByteBufferUtil.bytes("a")); ++ ByteBuffer z = CompositeType.build(ByteBufferUtil.bytes("z")); + + assert baseType.compare(a,z) < 0; + assert baseType.compare(z,a) > 0; +diff --git a/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java +index cc66e71..9b99b79 100644 +--- a/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java ++++ b/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java +@@ -38,6 +38,7 @@ import org.apache.cassandra.exceptions.ConfigurationException; + import org.apache.cassandra.exceptions.SyntaxException; + import org.apache.cassandra.schema.KeyspaceParams; + import org.apache.cassandra.serializers.MarshalException; ++import org.apache.cassandra.serializers.UTF8Serializer; + import org.apache.cassandra.utils.*; + + public class CompositeTypeTest +@@ -263,11 +264,11 @@ public class CompositeTypeTest + + for (String[] input : inputs) + { +- CompositeType.Builder builder = new CompositeType.Builder(comp); +- for (String part : input) +- builder.add(UTF8Type.instance.fromString(part)); ++ ByteBuffer[] bbs = new ByteBuffer[input.length]; ++ for (int i = 0; i < input.length; i++) ++ bbs[i] = UTF8Type.instance.fromString(input[i]); + +- ByteBuffer value = comp.fromString(comp.getString(builder.build())); ++ ByteBuffer value = comp.fromString(comp.getString(CompositeType.build(bbs))); + ByteBuffer[] splitted = comp.split(value); + for (int i = 0; i < splitted.length; i++) + assertEquals(input[i], UTF8Type.instance.getString(splitted[i])); +diff --git a/test/unit/org/apache/cassandra/hadoop/ColumnFamilyInputFormatTest.java b/test/unit/org/apache/cassandra/hadoop/ColumnFamilyInputFormatTest.java +deleted file mode 100644 +index d4261bf..0000000 +--- a/test/unit/org/apache/cassandra/hadoop/ColumnFamilyInputFormatTest.java ++++ /dev/null +@@ -1,52 +0,0 @@ +-package org.apache.cassandra.hadoop; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import java.nio.ByteBuffer; +-import java.util.ArrayList; +-import java.util.List; +- +-import org.apache.cassandra.thrift.SlicePredicate; +-import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.hadoop.conf.Configuration; +-import org.junit.Test; +- +-public class ColumnFamilyInputFormatTest +-{ +- @Test +- public void testSlicePredicate() +- { +- long columnValue = 1271253600000l; +- ByteBuffer columnBytes = ByteBufferUtil.bytes(columnValue); +- +- List columnNames = new ArrayList(); +- columnNames.add(columnBytes); +- SlicePredicate originalPredicate = new SlicePredicate().setColumn_names(columnNames); +- +- Configuration conf = new Configuration(); +- ConfigHelper.setInputSlicePredicate(conf, originalPredicate); +- +- SlicePredicate rtPredicate = ConfigHelper.getInputSlicePredicate(conf); +- assert rtPredicate.column_names.size() == 1; +- assert originalPredicate.column_names.get(0).equals(rtPredicate.column_names.get(0)); +- } +-} +diff --git a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java +index 46d1a3c..a0c0a93 100644 +--- a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java ++++ b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java +@@ -61,7 +61,6 @@ import org.apache.cassandra.serializers.MarshalException; + import org.apache.cassandra.serializers.TypeSerializer; + import org.apache.cassandra.service.MigrationManager; + import org.apache.cassandra.service.QueryState; +-import org.apache.cassandra.thrift.CqlRow; + import org.apache.cassandra.transport.messages.ResultMessage; + import org.apache.cassandra.utils.ByteBufferUtil; + import org.apache.cassandra.utils.FBUtilities; +@@ -2275,7 +2274,7 @@ public class SASIIndexTest + FBUtilities.nowInSeconds(), + columnFilter, + filter, +- DataLimits.thriftLimits(maxResults, DataLimits.NO_LIMIT), ++ DataLimits.cqlLimits(maxResults), + range, + Optional.empty()); + +@@ -2336,18 +2335,12 @@ public class SASIIndexTest + + private Set executeCQLWithKeys(String rawStatement) throws Exception + { +- SelectStatement statement = (SelectStatement) QueryProcessor.parseStatement(rawStatement).prepare().statement; +- ResultMessage.Rows cqlRows = statement.executeInternal(QueryState.forInternalCalls(), QueryOptions.DEFAULT); + + Set results = new TreeSet<>(); +- for (CqlRow row : cqlRows.toThriftResult().getRows()) ++ for (UntypedResultSet.Row row : QueryProcessor.executeOnceInternal(rawStatement)) + { +- for (org.apache.cassandra.thrift.Column col : row.columns) +- { +- String columnName = UTF8Type.instance.getString(col.bufferForName()); +- if (columnName.equals("id")) +- results.add(AsciiType.instance.getString(col.bufferForValue())); +- } ++ if (row.has("id")) ++ results.add(row.getString("id")); + } + + return results; +diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java +index 88ed52e..1f2221e 100644 +--- a/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java ++++ b/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java +@@ -210,7 +210,7 @@ public class SSTableCorruptionDetectionTest extends SSTableWriterTestBase + for (int i = 0; i < numberOfPks; i++) + { + DecoratedKey dk = Util.dk(String.format("pkvalue_%07d", i)); +- try (UnfilteredRowIterator rowIter = sstable.iterator(dk, Slices.ALL, ColumnFilter.all(cfs.metadata), false, false)) ++ try (UnfilteredRowIterator rowIter = sstable.iterator(dk, Slices.ALL, ColumnFilter.all(cfs.metadata), false)) + { + while (rowIter.hasNext()) + { +diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java +index d73c278..8c53ffb 100644 +--- a/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java ++++ b/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java +@@ -182,7 +182,7 @@ public class SSTableScannerTest + assert boundaries.length % 2 == 0; + for (DataRange range : dataRanges(sstable.metadata, scanStart, scanEnd)) + { +- try(ISSTableScanner scanner = sstable.getScanner(ColumnFilter.all(sstable.metadata), range, false)) ++ try(ISSTableScanner scanner = sstable.getScanner(ColumnFilter.all(sstable.metadata), range)) + { + for (int b = 0; b < boundaries.length; b += 2) + for (int i = boundaries[b]; i <= boundaries[b + 1]; i++) +diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java +index 91843d9..9e88720 100644 +--- a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java ++++ b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java +@@ -223,7 +223,7 @@ public class SSTableWriterTest extends SSTableWriterTestBase + try + { + DecoratedKey dk = Util.dk("large_value"); +- UnfilteredRowIterator rowIter = sstable.iterator(dk, Slices.ALL, ColumnFilter.all(cfs.metadata), false, false); ++ UnfilteredRowIterator rowIter = sstable.iterator(dk, Slices.ALL, ColumnFilter.all(cfs.metadata), false); + while (rowIter.hasNext()) + { + rowIter.next(); +diff --git a/test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java b/test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java +deleted file mode 100644 +index 6b0427b..0000000 +--- a/test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java ++++ /dev/null +@@ -1,133 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.io.sstable.format; +- +-import static org.apache.cassandra.utils.ByteBufferUtil.bytes; +- +-import java.io.File; +-import java.nio.ByteBuffer; +- +-import com.google.common.util.concurrent.Runnables; +-import org.junit.BeforeClass; +-import org.junit.Test; +- +-import org.apache.cassandra.concurrent.ScheduledExecutors; +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.config.Config; +-import org.apache.cassandra.db.Slices; +-import org.apache.cassandra.db.filter.ColumnFilter; +-import org.apache.cassandra.db.marshal.BytesType; +-import org.apache.cassandra.db.rows.UnfilteredRowIterator; +-import org.apache.cassandra.dht.ByteOrderedPartitioner; +-import org.apache.cassandra.exceptions.ConfigurationException; +-import org.apache.cassandra.io.sstable.Descriptor; +- +-/** +- * Tests backwards compatibility for SSTables +- */ +-public class ClientModeSSTableTest +-{ +- public static final String LEGACY_SSTABLE_PROP = "legacy-sstable-root"; +- public static final String KSNAME = "Keyspace1"; +- public static final String CFNAME = "Standard1"; +- +- public static File LEGACY_SSTABLE_ROOT; +- +- static CFMetaData metadata; +- +- @BeforeClass +- public static void defineSchema() throws ConfigurationException +- { +- Config.setClientMode(true); +- +- metadata = CFMetaData.Builder.createDense(KSNAME, CFNAME, false, false) +- .addPartitionKey("key", BytesType.instance) +- .addClusteringColumn("column", BytesType.instance) +- .addRegularColumn("value", BytesType.instance) +- .withPartitioner(ByteOrderedPartitioner.instance) +- .build(); +- +- String scp = System.getProperty(LEGACY_SSTABLE_PROP); +- assert scp != null; +- LEGACY_SSTABLE_ROOT = new File(scp).getAbsoluteFile(); +- assert LEGACY_SSTABLE_ROOT.isDirectory(); +- } +- +- /** +- * Get a descriptor for the legacy sstable at the given version. +- */ +- protected Descriptor getDescriptor(String ver) +- { +- File directory = new File(LEGACY_SSTABLE_ROOT + File.separator + ver + File.separator + KSNAME); +- return new Descriptor(ver, directory, KSNAME, CFNAME, 0, SSTableFormat.Type.LEGACY); +- } +- +- @Test +- public void testVersions() throws Throwable +- { +- boolean notSkipped = false; +- +- for (File version : LEGACY_SSTABLE_ROOT.listFiles()) +- { +- if (!new File(LEGACY_SSTABLE_ROOT + File.separator + version.getName() + File.separator + KSNAME).isDirectory()) +- continue; +- if (Version.validate(version.getName()) && SSTableFormat.Type.LEGACY.info.getVersion(version.getName()).isCompatible()) +- { +- notSkipped = true; +- testVersion(version.getName()); +- } +- } +- +- assert notSkipped; +- } +- +- public void testVersion(String version) throws Throwable +- { +- SSTableReader reader = null; +- try +- { +- reader = SSTableReader.openNoValidation(getDescriptor(version), metadata); +- +- ByteBuffer key = bytes(Integer.toString(100)); +- +- try (UnfilteredRowIterator iter = reader.iterator(metadata.decorateKey(key), Slices.ALL, ColumnFilter.selection(metadata.partitionColumns()), false, false)) +- { +- assert iter.next().clustering().get(0).equals(key); +- } +- } +- catch (Throwable e) +- { +- System.err.println("Failed to read " + version); +- throw e; +- } +- finally +- { +- if (reader != null) +- { +- int globalTidyCount = SSTableReader.GlobalTidy.lookup.size(); +- reader.selfRef().release(); +- assert reader.selfRef().globalCount() == 0; +- +- // await clean-up to complete if started. +- ScheduledExecutors.nonPeriodicTasks.submit(Runnables.doNothing()).get(); +- // Ensure clean-up completed. +- assert SSTableReader.GlobalTidy.lookup.size() < globalTidyCount; +- } +- } +- } +-} +diff --git a/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java b/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java +deleted file mode 100644 +index 72441cd..0000000 +--- a/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java ++++ /dev/null +@@ -1,843 +0,0 @@ +-/* +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.schema; +- +-import java.io.IOException; +-import java.nio.ByteBuffer; +-import java.util.*; +-import java.util.stream.Collectors; +- +-import com.google.common.collect.ImmutableList; +-import org.junit.Test; +- +-import org.apache.cassandra.SchemaLoader; +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.config.ColumnDefinition; +-import org.apache.cassandra.config.Schema; +-import org.apache.cassandra.cql3.CQLTester; +-import org.apache.cassandra.cql3.ColumnIdentifier; +-import org.apache.cassandra.cql3.FieldIdentifier; +-import org.apache.cassandra.cql3.functions.*; +-import org.apache.cassandra.db.*; +-import org.apache.cassandra.db.rows.Row; +-import org.apache.cassandra.db.marshal.*; +-import org.apache.cassandra.index.TargetParser; +-import org.apache.cassandra.thrift.ThriftConversion; +- +-import static java.lang.String.format; +-import static junit.framework.Assert.assertEquals; +-import static junit.framework.Assert.assertFalse; +-import static junit.framework.Assert.assertTrue; +-import static org.apache.cassandra.cql3.QueryProcessor.executeOnceInternal; +-import static org.apache.cassandra.utils.ByteBufferUtil.bytes; +-import static org.apache.cassandra.utils.FBUtilities.json; +- +-@SuppressWarnings("deprecation") +-public class LegacySchemaMigratorTest +-{ +- private static final long TIMESTAMP = 1435908994000000L; +- +- private static final String KEYSPACE_PREFIX = "LegacySchemaMigratorTest"; +- +- /* +- * 1. Write a variety of different keyspaces/tables/types/function in the legacy manner, using legacy schema tables +- * 2. Run the migrator +- * 3. Read all the keyspaces from the new schema tables +- * 4. Make sure that we've read *exactly* the same set of keyspaces/tables/types/functions +- * 5. Validate that the legacy schema tables are now empty +- */ +- @Test +- public void testMigrate() throws IOException +- { +- CQLTester.cleanupAndLeaveDirs(); +- +- Keyspaces expected = keyspacesToMigrate(); +- +- // write the keyspaces into the legacy tables +- expected.forEach(LegacySchemaMigratorTest::legacySerializeKeyspace); +- +- // run the migration +- LegacySchemaMigrator.migrate(); +- +- // read back all the metadata from the new schema tables +- Keyspaces actual = SchemaKeyspace.fetchNonSystemKeyspaces(); +- +- // need to load back CFMetaData of those tables (CFS instances will still be loaded) +- loadLegacySchemaTables(); +- +- // verify that nothing's left in the old schema tables +- for (CFMetaData table : LegacySchemaMigrator.LegacySchemaTables) +- { +- String query = format("SELECT * FROM %s.%s", SystemKeyspace.NAME, table.cfName); +- //noinspection ConstantConditions +- assertTrue(executeOnceInternal(query).isEmpty()); +- } +- +- // make sure that we've read *exactly* the same set of keyspaces/tables/types/functions +- assertEquals(expected.diff(actual).toString(), expected, actual); +- +- // check that the build status of all indexes has been updated to use the new +- // format of index name: the index_name column of system.IndexInfo used to +- // contain table_name.index_name. Now it should contain just the index_name. +- expected.forEach(LegacySchemaMigratorTest::verifyIndexBuildStatus); +- } +- +- private static FieldIdentifier field(String field) +- { +- return FieldIdentifier.forQuoted(field); +- } +- +- private static void loadLegacySchemaTables() +- { +- KeyspaceMetadata systemKeyspace = Schema.instance.getKSMetaData(SystemKeyspace.NAME); +- +- Tables systemTables = systemKeyspace.tables; +- for (CFMetaData table : LegacySchemaMigrator.LegacySchemaTables) +- systemTables = systemTables.with(table); +- +- LegacySchemaMigrator.LegacySchemaTables.forEach(Schema.instance::load); +- +- Schema.instance.setKeyspaceMetadata(systemKeyspace.withSwapped(systemTables)); +- } +- +- private static Keyspaces keyspacesToMigrate() +- { +- Keyspaces.Builder keyspaces = Keyspaces.builder(); +- +- // A whole bucket of shorthand +- String ks1 = KEYSPACE_PREFIX + "Keyspace1"; +- String ks2 = KEYSPACE_PREFIX + "Keyspace2"; +- String ks3 = KEYSPACE_PREFIX + "Keyspace3"; +- String ks4 = KEYSPACE_PREFIX + "Keyspace4"; +- String ks5 = KEYSPACE_PREFIX + "Keyspace5"; +- String ks6 = KEYSPACE_PREFIX + "Keyspace6"; +- String ks_rcs = KEYSPACE_PREFIX + "RowCacheSpace"; +- String ks_nocommit = KEYSPACE_PREFIX + "NoCommitlogSpace"; +- String ks_prsi = KEYSPACE_PREFIX + "PerRowSecondaryIndex"; +- String ks_cql = KEYSPACE_PREFIX + "cql_keyspace"; +- +- // Make it easy to test compaction +- Map compactionOptions = new HashMap<>(); +- compactionOptions.put("tombstone_compaction_interval", "1"); +- +- Map leveledOptions = new HashMap<>(); +- leveledOptions.put("sstable_size_in_mb", "1"); +- +- keyspaces.add(KeyspaceMetadata.create(ks1, +- KeyspaceParams.simple(1), +- Tables.of(SchemaLoader.standardCFMD(ks1, "Standard1") +- .compaction(CompactionParams.scts(compactionOptions)), +- SchemaLoader.standardCFMD(ks1, "StandardGCGS0").gcGraceSeconds(0), +- SchemaLoader.standardCFMD(ks1, "StandardLong1"), +- SchemaLoader.superCFMD(ks1, "Super1", LongType.instance), +- SchemaLoader.superCFMD(ks1, "Super2", UTF8Type.instance), +- SchemaLoader.superCFMD(ks1, "Super5", BytesType.instance), +- SchemaLoader.superCFMD(ks1, "Super6", LexicalUUIDType.instance, UTF8Type.instance), +- SchemaLoader.keysIndexCFMD(ks1, "Indexed1", true), +- SchemaLoader.keysIndexCFMD(ks1, "Indexed2", false), +- SchemaLoader.superCFMD(ks1, "SuperDirectGC", BytesType.instance) +- .gcGraceSeconds(0), +- SchemaLoader.jdbcCFMD(ks1, "JdbcUtf8", UTF8Type.instance) +- .addColumnDefinition(SchemaLoader.utf8Column(ks1, "JdbcUtf8")), +- SchemaLoader.jdbcCFMD(ks1, "JdbcLong", LongType.instance), +- SchemaLoader.jdbcCFMD(ks1, "JdbcBytes", BytesType.instance), +- SchemaLoader.jdbcCFMD(ks1, "JdbcAscii", AsciiType.instance), +- SchemaLoader.standardCFMD(ks1, "StandardLeveled") +- .compaction(CompactionParams.lcs(leveledOptions)), +- SchemaLoader.standardCFMD(ks1, "legacyleveled") +- .compaction(CompactionParams.lcs(leveledOptions)), +- SchemaLoader.standardCFMD(ks1, "StandardLowIndexInterval") +- .minIndexInterval(8) +- .maxIndexInterval(256) +- .caching(CachingParams.CACHE_NOTHING)))); +- +- // Keyspace 2 +- keyspaces.add(KeyspaceMetadata.create(ks2, +- KeyspaceParams.simple(1), +- Tables.of(SchemaLoader.standardCFMD(ks2, "Standard1"), +- SchemaLoader.superCFMD(ks2, "Super3", BytesType.instance), +- SchemaLoader.superCFMD(ks2, "Super4", TimeUUIDType.instance), +- SchemaLoader.keysIndexCFMD(ks2, "Indexed1", true), +- SchemaLoader.compositeIndexCFMD(ks2, "Indexed2", true), +- SchemaLoader.compositeIndexCFMD(ks2, "Indexed3", true) +- .gcGraceSeconds(0)))); +- +- // Keyspace 3 +- keyspaces.add(KeyspaceMetadata.create(ks3, +- KeyspaceParams.simple(5), +- Tables.of(SchemaLoader.standardCFMD(ks3, "Standard1"), +- SchemaLoader.keysIndexCFMD(ks3, "Indexed1", true)))); +- +- // Keyspace 4 +- keyspaces.add(KeyspaceMetadata.create(ks4, +- KeyspaceParams.simple(3), +- Tables.of(SchemaLoader.standardCFMD(ks4, "Standard1"), +- SchemaLoader.superCFMD(ks4, "Super3", BytesType.instance), +- SchemaLoader.superCFMD(ks4, "Super4", TimeUUIDType.instance), +- SchemaLoader.superCFMD(ks4, "Super5", TimeUUIDType.instance, BytesType.instance)))); +- +- // Keyspace 5 +- keyspaces.add(KeyspaceMetadata.create(ks5, +- KeyspaceParams.simple(2), +- Tables.of(SchemaLoader.standardCFMD(ks5, "Standard1")))); +- +- // Keyspace 6 +- keyspaces.add(KeyspaceMetadata.create(ks6, +- KeyspaceParams.simple(1), +- Tables.of(SchemaLoader.keysIndexCFMD(ks6, "Indexed1", true)))); +- +- // RowCacheSpace +- keyspaces.add(KeyspaceMetadata.create(ks_rcs, +- KeyspaceParams.simple(1), +- Tables.of(SchemaLoader.standardCFMD(ks_rcs, "CFWithoutCache") +- .caching(CachingParams.CACHE_NOTHING), +- SchemaLoader.standardCFMD(ks_rcs, "CachedCF") +- .caching(CachingParams.CACHE_EVERYTHING), +- SchemaLoader.standardCFMD(ks_rcs, "CachedIntCF") +- .caching(new CachingParams(true, 100))))); +- +- keyspaces.add(KeyspaceMetadata.create(ks_nocommit, +- KeyspaceParams.simpleTransient(1), +- Tables.of(SchemaLoader.standardCFMD(ks_nocommit, "Standard1")))); +- +- // PerRowSecondaryIndexTest +- keyspaces.add(KeyspaceMetadata.create(ks_prsi, +- KeyspaceParams.simple(1), +- Tables.of(SchemaLoader.perRowIndexedCFMD(ks_prsi, "Indexed1")))); +- +- // CQLKeyspace +- keyspaces.add(KeyspaceMetadata.create(ks_cql, +- KeyspaceParams.simple(1), +- Tables.of(CFMetaData.compile("CREATE TABLE table1 (" +- + "k int PRIMARY KEY," +- + "v1 text," +- + "v2 int" +- + ')', ks_cql), +- +- CFMetaData.compile("CREATE TABLE table2 (" +- + "k text," +- + "c text," +- + "v text," +- + "PRIMARY KEY (k, c))", ks_cql), +- +- CFMetaData.compile("CREATE TABLE foo (" +- + "bar text, " +- + "baz text, " +- + "qux text, " +- + "PRIMARY KEY(bar, baz) ) " +- + "WITH COMPACT STORAGE", ks_cql), +- +- CFMetaData.compile("CREATE TABLE compact_pkonly (" +- + "k int, " +- + "c int, " +- + "PRIMARY KEY (k, c)) " +- + "WITH COMPACT STORAGE", +- ks_cql), +- +- CFMetaData.compile("CREATE TABLE foofoo (" +- + "bar text, " +- + "baz text, " +- + "qux text, " +- + "quz text, " +- + "foo text, " +- + "PRIMARY KEY((bar, baz), qux, quz) ) " +- + "WITH COMPACT STORAGE", ks_cql)))); +- +- // NTS keyspace +- keyspaces.add(KeyspaceMetadata.create("nts", KeyspaceParams.nts("dc1", 1, "dc2", 2))); +- +- keyspaces.add(keyspaceWithDroppedCollections()); +- keyspaces.add(keyspaceWithTriggers()); +- keyspaces.add(keyspaceWithUDTs()); +- keyspaces.add(keyspaceWithUDFs()); +- keyspaces.add(keyspaceWithUDFsAndUDTs()); +- keyspaces.add(keyspaceWithUDAs()); +- keyspaces.add(keyspaceWithUDAsAndUDTs()); +- +- return keyspaces.build(); +- } +- +- private static KeyspaceMetadata keyspaceWithDroppedCollections() +- { +- String keyspace = KEYSPACE_PREFIX + "DroppedCollections"; +- +- CFMetaData table = +- CFMetaData.compile("CREATE TABLE dropped_columns (" +- + "foo text," +- + "bar text," +- + "map1 map," +- + "map2 map," +- + "set1 set," +- + "list1 list," +- + "PRIMARY KEY ((foo), bar))", +- keyspace); +- +- String[] collectionColumnNames = { "map1", "map2", "set1", "list1" }; +- for (String name : collectionColumnNames) +- { +- ColumnDefinition column = table.getColumnDefinition(bytes(name)); +- table.recordColumnDrop(column); +- table.removeColumnDefinition(column); +- } +- +- return KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1), Tables.of(table)); +- } +- +- private static KeyspaceMetadata keyspaceWithTriggers() +- { +- String keyspace = KEYSPACE_PREFIX + "Triggers"; +- +- Triggers.Builder triggers = Triggers.builder(); +- CFMetaData table = SchemaLoader.standardCFMD(keyspace, "WithTriggers"); +- for (int i = 0; i < 10; i++) +- triggers.add(new TriggerMetadata("trigger" + i, "DummyTrigger" + i)); +- table.triggers(triggers.build()); +- +- return KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1), Tables.of(table)); +- } +- +- private static KeyspaceMetadata keyspaceWithUDTs() +- { +- String keyspace = KEYSPACE_PREFIX + "UDTs"; +- +- UserType udt1 = new UserType(keyspace, +- bytes("udt1"), +- new ArrayList() {{ add(field("col1")); add(field("col2")); }}, +- new ArrayList>() {{ add(UTF8Type.instance); add(Int32Type.instance); }}, +- true); +- +- UserType udt2 = new UserType(keyspace, +- bytes("udt2"), +- new ArrayList() {{ add(field("col3")); add(field("col4")); }}, +- new ArrayList>() {{ add(BytesType.instance); add(BooleanType.instance); }}, +- true); +- +- UserType udt3 = new UserType(keyspace, +- bytes("udt3"), +- new ArrayList() {{ add(field("col5")); }}, +- new ArrayList>() {{ add(AsciiType.instance); }}, +- true); +- +- return KeyspaceMetadata.create(keyspace, +- KeyspaceParams.simple(1), +- Tables.none(), +- Views.none(), +- Types.of(udt1, udt2, udt3), +- Functions.none()); +- } +- +- private static KeyspaceMetadata keyspaceWithUDFs() +- { +- String keyspace = KEYSPACE_PREFIX + "UDFs"; +- +- UDFunction udf1 = UDFunction.create(new FunctionName(keyspace, "udf"), +- ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)), +- ImmutableList.of(BytesType.instance, Int32Type.instance), +- LongType.instance, +- false, +- "java", +- "return 42L;"); +- +- // an overload with the same name, not a typo +- UDFunction udf2 = UDFunction.create(new FunctionName(keyspace, "udf"), +- ImmutableList.of(new ColumnIdentifier("col3", false), new ColumnIdentifier("col4", false)), +- ImmutableList.of(AsciiType.instance, LongType.instance), +- Int32Type.instance, +- true, +- "java", +- "return 42;"); +- +- UDFunction udf3 = UDFunction.create(new FunctionName(keyspace, "udf3"), +- ImmutableList.of(new ColumnIdentifier("col4", false)), +- ImmutableList.of(UTF8Type.instance), +- BooleanType.instance, +- false, +- "java", +- "return true;"); +- +- return KeyspaceMetadata.create(keyspace, +- KeyspaceParams.simple(1), +- Tables.none(), +- Views.none(), +- Types.none(), +- Functions.of(udf1, udf2, udf3)); +- } +- +- private static KeyspaceMetadata keyspaceWithUDAs() +- { +- String keyspace = KEYSPACE_PREFIX + "UDAs"; +- +- UDFunction udf1 = UDFunction.create(new FunctionName(keyspace, "udf1"), +- ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)), +- ImmutableList.of(Int32Type.instance, Int32Type.instance), +- Int32Type.instance, +- false, +- "java", +- "return 42;"); +- +- UDFunction udf2 = UDFunction.create(new FunctionName(keyspace, "udf2"), +- ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)), +- ImmutableList.of(LongType.instance, Int32Type.instance), +- LongType.instance, +- false, +- "java", +- "return 42L;"); +- +- UDFunction udf3 = UDFunction.create(new FunctionName(keyspace, "udf3"), +- ImmutableList.of(new ColumnIdentifier("col1", false)), +- ImmutableList.of(LongType.instance), +- DoubleType.instance, +- false, +- "java", +- "return 42d;"); +- +- Functions udfs = Functions.builder().add(udf1).add(udf2).add(udf3).build(); +- +- UDAggregate uda1 = UDAggregate.create(udfs, new FunctionName(keyspace, "uda1"), +- ImmutableList.of(udf1.argTypes().get(1)), +- udf1.returnType(), +- udf1.name(), +- null, +- udf1.argTypes().get(0), +- null +- ); +- +- UDAggregate uda2 = UDAggregate.create(udfs, new FunctionName(keyspace, "uda2"), +- ImmutableList.of(udf2.argTypes().get(1)), +- udf3.returnType(), +- udf2.name(), +- udf3.name(), +- udf2.argTypes().get(0), +- LongType.instance.decompose(0L) +- ); +- +- return KeyspaceMetadata.create(keyspace, +- KeyspaceParams.simple(1), +- Tables.none(), +- Views.none(), +- Types.none(), +- Functions.of(udf1, udf2, udf3, uda1, uda2)); +- } +- +- private static KeyspaceMetadata keyspaceWithUDFsAndUDTs() +- { +- String keyspace = KEYSPACE_PREFIX + "UDFUDTs"; +- +- UserType udt1 = new UserType(keyspace, +- bytes("udt1"), +- new ArrayList() {{ add(field("col1")); add(field("col2")); }}, +- new ArrayList>() {{ add(UTF8Type.instance); add(Int32Type.instance); }}, +- true); +- +- UserType udt2 = new UserType(keyspace, +- bytes("udt2"), +- new ArrayList() {{ add(field("col1")); add(field("col2")); }}, +- new ArrayList>() {{ add(ListType.getInstance(udt1, false)); add(Int32Type.instance); }}, +- true); +- +- UDFunction udf1 = UDFunction.create(new FunctionName(keyspace, "udf"), +- ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)), +- ImmutableList.of(udt1, udt2), +- LongType.instance, +- false, +- "java", +- "return 42L;"); +- +- // an overload with the same name, not a typo +- UDFunction udf2 = UDFunction.create(new FunctionName(keyspace, "udf"), +- ImmutableList.of(new ColumnIdentifier("col3", false), new ColumnIdentifier("col4", false)), +- ImmutableList.of(AsciiType.instance, LongType.instance), +- Int32Type.instance, +- true, +- "java", +- "return 42;"); +- +- UDFunction udf3 = UDFunction.create(new FunctionName(keyspace, "udf3"), +- ImmutableList.of(new ColumnIdentifier("col4", false)), +- ImmutableList.of(new TupleType(Arrays.asList(udt1, udt2))), +- BooleanType.instance, +- false, +- "java", +- "return true;"); +- +- return KeyspaceMetadata.create(keyspace, +- KeyspaceParams.simple(1), +- Tables.none(), +- Views.none(), +- Types.of(udt1, udt2), +- Functions.of(udf1, udf2, udf3)); +- } +- +- private static KeyspaceMetadata keyspaceWithUDAsAndUDTs() +- { +- String keyspace = KEYSPACE_PREFIX + "UDAUDTs"; +- +- UserType udt1 = new UserType(keyspace, +- bytes("udt1"), +- new ArrayList() {{ add(field("col1")); add(field("col2")); }}, +- new ArrayList>() {{ add(UTF8Type.instance); add(Int32Type.instance); }}, +- true); +- +- UserType udt2 = new UserType(keyspace, +- bytes("udt2"), +- new ArrayList() {{ add(field("col1")); add(field("col2")); }}, +- new ArrayList>() {{ add(ListType.getInstance(udt1, false)); add(Int32Type.instance); }}, +- true); +- +- UDFunction udf1 = UDFunction.create(new FunctionName(keyspace, "udf1"), +- ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)), +- ImmutableList.of(udt1, udt2), +- udt1, +- false, +- "java", +- "return null;"); +- +- UDFunction udf2 = UDFunction.create(new FunctionName(keyspace, "udf2"), +- ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)), +- ImmutableList.of(udt2, udt1), +- udt2, +- false, +- "java", +- "return null;"); +- +- UDFunction udf3 = UDFunction.create(new FunctionName(keyspace, "udf3"), +- ImmutableList.of(new ColumnIdentifier("col1", false)), +- ImmutableList.of(udt2), +- DoubleType.instance, +- false, +- "java", +- "return 42d;"); +- +- Functions udfs = Functions.builder().add(udf1).add(udf2).add(udf3).build(); +- +- UDAggregate uda1 = UDAggregate.create(udfs, new FunctionName(keyspace, "uda1"), +- ImmutableList.of(udf1.argTypes().get(1)), +- udf1.returnType(), +- udf1.name(), +- null, +- udf1.argTypes().get(0), +- null +- ); +- +- ByteBuffer twoNullEntries = ByteBuffer.allocate(8); +- twoNullEntries.putInt(-1); +- twoNullEntries.putInt(-1); +- twoNullEntries.flip(); +- UDAggregate uda2 = UDAggregate.create(udfs, new FunctionName(keyspace, "uda2"), +- ImmutableList.of(udf2.argTypes().get(1)), +- udf3.returnType(), +- udf2.name(), +- udf3.name(), +- udf2.argTypes().get(0), +- twoNullEntries +- ); +- +- return KeyspaceMetadata.create(keyspace, +- KeyspaceParams.simple(1), +- Tables.none(), +- Views.none(), +- Types.of(udt1, udt2), +- Functions.of(udf1, udf2, udf3, uda1, uda2)); +- } +- +- /* +- * Serializing keyspaces +- */ +- +- private static void legacySerializeKeyspace(KeyspaceMetadata keyspace) +- { +- makeLegacyCreateKeyspaceMutation(keyspace, TIMESTAMP).apply(); +- setLegacyIndexStatus(keyspace); +- } +- +- private static DecoratedKey decorate(CFMetaData metadata, Object value) +- { +- return metadata.decorateKey(((AbstractType)metadata.getKeyValidator()).decompose(value)); +- } +- +- private static Mutation makeLegacyCreateKeyspaceMutation(KeyspaceMetadata keyspace, long timestamp) +- { +- Mutation.SimpleBuilder builder = Mutation.simpleBuilder(SystemKeyspace.NAME, decorate(SystemKeyspace.LegacyKeyspaces, keyspace.name)) +- .timestamp(timestamp); +- +- builder.update(SystemKeyspace.LegacyKeyspaces) +- .row() +- .add("durable_writes", keyspace.params.durableWrites) +- .add("strategy_class", keyspace.params.replication.klass.getName()) +- .add("strategy_options", json(keyspace.params.replication.options)); +- +- keyspace.tables.forEach(table -> addTableToSchemaMutation(table, true, builder)); +- keyspace.types.forEach(type -> addTypeToSchemaMutation(type, builder)); +- keyspace.functions.udfs().forEach(udf -> addFunctionToSchemaMutation(udf, builder)); +- keyspace.functions.udas().forEach(uda -> addAggregateToSchemaMutation(uda, builder)); +- +- return builder.build(); +- } +- +- /* +- * Serializing tables +- */ +- +- private static void addTableToSchemaMutation(CFMetaData table, boolean withColumnsAndTriggers, Mutation.SimpleBuilder builder) +- { +- // For property that can be null (and can be changed), we insert tombstones, to make sure +- // we don't keep a property the user has removed +- Row.SimpleBuilder adder = builder.update(SystemKeyspace.LegacyColumnfamilies) +- .row(table.cfName); +- +- adder.add("cf_id", table.cfId) +- .add("type", table.isSuper() ? "Super" : "Standard"); +- +- if (table.isSuper()) +- { +- adder.add("comparator", table.comparator.subtype(0).toString()) +- .add("subcomparator", ((MapType)table.compactValueColumn().type).getKeysType().toString()); +- } +- else +- { +- adder.add("comparator", LegacyLayout.makeLegacyComparator(table).toString()); +- } +- +- adder.add("bloom_filter_fp_chance", table.params.bloomFilterFpChance) +- .add("caching", cachingToString(table.params.caching)) +- .add("comment", table.params.comment) +- .add("compaction_strategy_class", table.params.compaction.klass().getName()) +- .add("compaction_strategy_options", json(table.params.compaction.options())) +- .add("compression_parameters", json(ThriftConversion.compressionParametersToThrift(table.params.compression))) +- .add("default_time_to_live", table.params.defaultTimeToLive) +- .add("gc_grace_seconds", table.params.gcGraceSeconds) +- .add("key_validator", table.getKeyValidator().toString()) +- .add("local_read_repair_chance", table.params.dcLocalReadRepairChance) +- .add("max_compaction_threshold", table.params.compaction.maxCompactionThreshold()) +- .add("max_index_interval", table.params.maxIndexInterval) +- .add("memtable_flush_period_in_ms", table.params.memtableFlushPeriodInMs) +- .add("min_compaction_threshold", table.params.compaction.minCompactionThreshold()) +- .add("min_index_interval", table.params.minIndexInterval) +- .add("read_repair_chance", table.params.readRepairChance) +- .add("speculative_retry", table.params.speculativeRetry.toString()); +- +- Map dropped = new HashMap<>(); +- for (Map.Entry entry : table.getDroppedColumns().entrySet()) +- { +- String name = UTF8Type.instance.getString(entry.getKey()); +- CFMetaData.DroppedColumn column = entry.getValue(); +- dropped.put(name, column.droppedTime); +- } +- adder.add("dropped_columns", dropped); +- +- adder.add("is_dense", table.isDense()); +- +- adder.add("default_validator", table.makeLegacyDefaultValidator().toString()); +- +- if (withColumnsAndTriggers) +- { +- for (ColumnDefinition column : table.allColumns()) +- addColumnToSchemaMutation(table, column, builder); +- +- for (TriggerMetadata trigger : table.getTriggers()) +- addTriggerToSchemaMutation(table, trigger, builder); +- } +- } +- +- private static String cachingToString(CachingParams caching) +- { +- return format("{\"keys\":\"%s\", \"rows_per_partition\":\"%s\"}", +- caching.keysAsString(), +- caching.rowsPerPartitionAsString()); +- } +- +- private static void addColumnToSchemaMutation(CFMetaData table, ColumnDefinition column, Mutation.SimpleBuilder builder) +- { +- // We need to special case pk-only dense tables. See CASSANDRA-9874. +- String name = table.isDense() && column.kind == ColumnDefinition.Kind.REGULAR && column.type instanceof EmptyType +- ? "" +- : column.name.toString(); +- +- final Row.SimpleBuilder adder = builder.update(SystemKeyspace.LegacyColumns).row(table.cfName, name); +- +- adder.add("validator", column.type.toString()) +- .add("type", serializeKind(column.kind, table.isDense())) +- .add("component_index", column.position()); +- +- Optional index = findIndexForColumn(table.getIndexes(), table, column); +- if (index.isPresent()) +- { +- IndexMetadata i = index.get(); +- adder.add("index_name", i.name); +- adder.add("index_type", i.kind.toString()); +- adder.add("index_options", json(i.options)); +- } +- else +- { +- adder.add("index_name", null); +- adder.add("index_type", null); +- adder.add("index_options", null); +- } +- } +- +- private static Optional findIndexForColumn(Indexes indexes, +- CFMetaData table, +- ColumnDefinition column) +- { +- // makes the assumptions that the string option denoting the +- // index targets can be parsed by CassandraIndex.parseTarget +- // which should be true for any pre-3.0 index +- for (IndexMetadata index : indexes) +- if (TargetParser.parse(table, index).left.equals(column)) +- return Optional.of(index); +- +- return Optional.empty(); +- } +- +- private static String serializeKind(ColumnDefinition.Kind kind, boolean isDense) +- { +- // For backward compatibility, we special case CLUSTERING and the case where the table is dense. +- if (kind == ColumnDefinition.Kind.CLUSTERING) +- return "clustering_key"; +- +- if (kind == ColumnDefinition.Kind.REGULAR && isDense) +- return "compact_value"; +- +- return kind.toString().toLowerCase(); +- } +- +- private static void addTriggerToSchemaMutation(CFMetaData table, TriggerMetadata trigger, Mutation.SimpleBuilder builder) +- { +- builder.update(SystemKeyspace.LegacyTriggers) +- .row(table.cfName, trigger.name) +- .add("trigger_options", Collections.singletonMap("class", trigger.classOption)); +- } +- +- /* +- * Serializing types +- */ +- +- private static void addTypeToSchemaMutation(UserType type, Mutation.SimpleBuilder builder) +- { +- Row.SimpleBuilder adder = builder.update(SystemKeyspace.LegacyUsertypes) +- .row(type.getNameAsString()); +- +- List names = new ArrayList<>(); +- List types = new ArrayList<>(); +- for (int i = 0; i < type.size(); i++) +- { +- names.add(type.fieldName(i).toString()); +- types.add(type.fieldType(i).toString()); +- } +- +- adder.add("field_names", names) +- .add("field_types", types); +- } +- +- /* +- * Serializing functions +- */ +- +- private static void addFunctionToSchemaMutation(UDFunction function, Mutation.SimpleBuilder builder) +- { +- Row.SimpleBuilder adder = builder.update(SystemKeyspace.LegacyFunctions) +- .row(function.name().name, functionSignatureWithTypes(function)); +- +- adder.add("body", function.body()) +- .add("language", function.language()) +- .add("return_type", function.returnType().toString()) +- .add("called_on_null_input", function.isCalledOnNullInput()); +- +- List names = new ArrayList<>(); +- List types = new ArrayList<>(); +- for (int i = 0; i < function.argNames().size(); i++) +- { +- names.add(function.argNames().get(i).bytes); +- types.add(function.argTypes().get(i).toString()); +- } +- adder.add("argument_names", names) +- .add("argument_types", types); +- } +- +- /* +- * Serializing aggregates +- */ +- +- private static void addAggregateToSchemaMutation(UDAggregate aggregate, Mutation.SimpleBuilder builder) +- { +- Row.SimpleBuilder adder = builder.update(SystemKeyspace.LegacyAggregates) +- .row(aggregate.name().name, functionSignatureWithTypes(aggregate)); +- +- adder.add("return_type", aggregate.returnType().toString()) +- .add("state_func", aggregate.stateFunction().name().name); +- +- if (aggregate.stateType() != null) +- adder.add("state_type", aggregate.stateType().toString()); +- if (aggregate.finalFunction() != null) +- adder.add("final_func", aggregate.finalFunction().name().name); +- if (aggregate.initialCondition() != null) +- adder.add("initcond", aggregate.initialCondition()); +- +- List types = new ArrayList<>(); +- for (AbstractType argType : aggregate.argTypes()) +- types.add(argType.toString()); +- +- adder.add("argument_types", types); +- } +- +- // We allow method overloads, so a function is not uniquely identified by its name only, but +- // also by its argument types. To distinguish overloads of given function name in the schema +- // we use a "signature" which is just a list of it's CQL argument types. +- public static ByteBuffer functionSignatureWithTypes(AbstractFunction fun) +- { +- List arguments = +- fun.argTypes() +- .stream() +- .map(argType -> argType.asCQL3Type().toString()) +- .collect(Collectors.toList()); +- +- return ListType.getInstance(UTF8Type.instance, false).decompose(arguments); +- } +- +- private static void setLegacyIndexStatus(KeyspaceMetadata keyspace) +- { +- keyspace.tables.forEach(LegacySchemaMigratorTest::setLegacyIndexStatus); +- } +- +- private static void setLegacyIndexStatus(CFMetaData table) +- { +- table.getIndexes().forEach((index) -> setLegacyIndexStatus(table.ksName, table.cfName, index)); +- } +- +- private static void setLegacyIndexStatus(String keyspace, String table, IndexMetadata index) +- { +- SystemKeyspace.setIndexBuilt(keyspace, table + '.' + index.name); +- } +- +- private static void verifyIndexBuildStatus(KeyspaceMetadata keyspace) +- { +- keyspace.tables.forEach(LegacySchemaMigratorTest::verifyIndexBuildStatus); +- } +- +- private static void verifyIndexBuildStatus(CFMetaData table) +- { +- table.getIndexes().forEach(index -> verifyIndexBuildStatus(table.ksName, table.cfName, index)); +- } +- +- private static void verifyIndexBuildStatus(String keyspace, String table, IndexMetadata index) +- { +- assertFalse(SystemKeyspace.isIndexBuilt(keyspace, table + '.' + index.name)); +- assertTrue(SystemKeyspace.isIndexBuilt(keyspace, index.name)); +- } +- +-} +diff --git a/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java b/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java +index d686fdb..a7ae748 100644 +--- a/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java ++++ b/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java +@@ -45,10 +45,6 @@ import org.apache.cassandra.db.marshal.UTF8Type; + import org.apache.cassandra.db.partitions.PartitionUpdate; + import org.apache.cassandra.db.rows.UnfilteredRowIterators; + import org.apache.cassandra.exceptions.ConfigurationException; +-import org.apache.cassandra.thrift.CfDef; +-import org.apache.cassandra.thrift.ColumnDef; +-import org.apache.cassandra.thrift.IndexType; +-import org.apache.cassandra.thrift.ThriftConversion; + import org.apache.cassandra.utils.ByteBufferUtil; + import org.apache.cassandra.utils.FBUtilities; + +@@ -60,19 +56,6 @@ public class SchemaKeyspaceTest + private static final String KEYSPACE1 = "CFMetaDataTest1"; + private static final String CF_STANDARD1 = "Standard1"; + +- private static final List columnDefs = new ArrayList<>(); +- +- static +- { +- columnDefs.add(new ColumnDef(ByteBufferUtil.bytes("col1"), AsciiType.class.getCanonicalName()) +- .setIndex_name("col1Index") +- .setIndex_type(IndexType.KEYS)); +- +- columnDefs.add(new ColumnDef(ByteBufferUtil.bytes("col2"), UTF8Type.class.getCanonicalName()) +- .setIndex_name("col2Index") +- .setIndex_type(IndexType.KEYS)); +- } +- + @BeforeClass + public static void defineSchema() throws ConfigurationException + { +@@ -83,43 +66,6 @@ public class SchemaKeyspaceTest + } + + @Test +- public void testThriftConversion() throws Exception +- { +- CfDef cfDef = new CfDef().setDefault_validation_class(AsciiType.class.getCanonicalName()) +- .setComment("Test comment") +- .setColumn_metadata(columnDefs) +- .setKeyspace(KEYSPACE1) +- .setName(CF_STANDARD1); +- +- // convert Thrift to CFMetaData +- CFMetaData cfMetaData = ThriftConversion.fromThrift(cfDef); +- +- CfDef thriftCfDef = new CfDef(); +- thriftCfDef.keyspace = KEYSPACE1; +- thriftCfDef.name = CF_STANDARD1; +- thriftCfDef.default_validation_class = cfDef.default_validation_class; +- thriftCfDef.comment = cfDef.comment; +- thriftCfDef.column_metadata = new ArrayList<>(); +- for (ColumnDef columnDef : columnDefs) +- { +- ColumnDef c = new ColumnDef(); +- c.name = ByteBufferUtil.clone(columnDef.name); +- c.validation_class = columnDef.getValidation_class(); +- c.index_name = columnDef.getIndex_name(); +- c.index_type = IndexType.KEYS; +- thriftCfDef.column_metadata.add(c); +- } +- +- CfDef converted = ThriftConversion.toThrift(cfMetaData); +- +- assertEquals(thriftCfDef.keyspace, converted.keyspace); +- assertEquals(thriftCfDef.name, converted.name); +- assertEquals(thriftCfDef.default_validation_class, converted.default_validation_class); +- assertEquals(thriftCfDef.comment, converted.comment); +- assertEquals(new HashSet<>(thriftCfDef.column_metadata), new HashSet<>(converted.column_metadata)); +- } +- +- @Test + public void testConversionsInverses() throws Exception + { + for (String keyspaceName : Schema.instance.getNonSystemKeyspaces()) +@@ -127,9 +73,6 @@ public class SchemaKeyspaceTest + for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores()) + { + CFMetaData cfm = cfs.metadata; +- if (!cfm.isThriftCompatible()) +- continue; +- + checkInverses(cfm); + + // Testing with compression to catch #3558 +@@ -181,11 +124,6 @@ public class SchemaKeyspaceTest + { + KeyspaceMetadata keyspace = Schema.instance.getKSMetaData(cfm.ksName); + +- // Test thrift conversion +- CFMetaData before = cfm; +- CFMetaData after = ThriftConversion.fromThriftForUpdate(ThriftConversion.toThrift(before), before); +- assert before.equals(after) : String.format("%n%s%n!=%n%s", before, after); +- + // Test schema conversion + Mutation rm = SchemaKeyspace.makeCreateTableMutation(keyspace, cfm, FBUtilities.timestampMicros()).build(); + PartitionUpdate serializedCf = rm.getPartitionUpdate(Schema.instance.getId(SchemaKeyspace.NAME, SchemaKeyspace.TABLES)); +diff --git a/test/unit/org/apache/cassandra/service/DataResolverTest.java b/test/unit/org/apache/cassandra/service/DataResolverTest.java +index 3fee5f9..db9f6fe 100644 +--- a/test/unit/org/apache/cassandra/service/DataResolverTest.java ++++ b/test/unit/org/apache/cassandra/service/DataResolverTest.java +@@ -328,7 +328,7 @@ public class DataResolverTest + .add("c2", "v2") + .buildUpdate()))); + InetAddress peer2 = peer(); +- resolver.preprocess(readResponseMessage(peer2, EmptyIterators.unfilteredPartition(cfm, false))); ++ resolver.preprocess(readResponseMessage(peer2, EmptyIterators.unfilteredPartition(cfm))); + + try(PartitionIterator data = resolver.resolve(); + RowIterator rows = Iterators.getOnlyElement(data)) +@@ -350,8 +350,8 @@ public class DataResolverTest + public void testResolveWithBothEmpty() + { + DataResolver resolver = new DataResolver(ks, command, ConsistencyLevel.ALL, 2); +- resolver.preprocess(readResponseMessage(peer(), EmptyIterators.unfilteredPartition(cfm, false))); +- resolver.preprocess(readResponseMessage(peer(), EmptyIterators.unfilteredPartition(cfm, false))); ++ resolver.preprocess(readResponseMessage(peer(), EmptyIterators.unfilteredPartition(cfm))); ++ resolver.preprocess(readResponseMessage(peer(), EmptyIterators.unfilteredPartition(cfm))); + + try(PartitionIterator data = resolver.resolve()) + { +@@ -847,7 +847,7 @@ public class DataResolverTest + + private UnfilteredPartitionIterator fullPartitionDelete(CFMetaData cfm, DecoratedKey dk, long timestamp, int nowInSec) + { +- return new SingletonUnfilteredPartitionIterator(PartitionUpdate.fullPartitionDelete(cfm, dk, timestamp, nowInSec).unfilteredIterator(), false); ++ return new SingletonUnfilteredPartitionIterator(PartitionUpdate.fullPartitionDelete(cfm, dk, timestamp, nowInSec).unfilteredIterator()); + } + + private static class MessageRecorder implements IMessageSink +@@ -867,6 +867,6 @@ public class DataResolverTest + + private UnfilteredPartitionIterator iter(PartitionUpdate update) + { +- return new SingletonUnfilteredPartitionIterator(update.unfilteredIterator(), false); ++ return new SingletonUnfilteredPartitionIterator(update.unfilteredIterator()); + } + } +diff --git a/test/unit/org/apache/cassandra/service/EmbeddedCassandraServiceTest.java b/test/unit/org/apache/cassandra/service/EmbeddedCassandraServiceTest.java +deleted file mode 100644 +index b89f01d..0000000 +--- a/test/unit/org/apache/cassandra/service/EmbeddedCassandraServiceTest.java ++++ /dev/null +@@ -1,129 +0,0 @@ +-/* +-* Licensed to the Apache Software Foundation (ASF) under one +-* or more contributor license agreements. See the NOTICE file +-* distributed with this work for additional information +-* regarding copyright ownership. The ASF licenses this file +-* to you under the Apache License, Version 2.0 (the +-* "License"); you may not use this file except in compliance +-* with the License. You may obtain a copy of the License at +-* +-* http://www.apache.org/licenses/LICENSE-2.0 +-* +-* Unless required by applicable law or agreed to in writing, +-* software distributed under the License is distributed on an +-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-* KIND, either express or implied. See the License for the +-* specific language governing permissions and limitations +-* under the License. +-*/ +-package org.apache.cassandra.service; +- +-import java.io.IOException; +-import java.nio.ByteBuffer; +-import java.nio.charset.CharacterCodingException; +- +-import org.junit.BeforeClass; +-import org.junit.Test; +- +-import org.apache.cassandra.SchemaLoader; +-import org.apache.cassandra.config.CFMetaData; +-import org.apache.cassandra.config.DatabaseDescriptor; +-import org.apache.cassandra.db.marshal.AsciiType; +-import org.apache.cassandra.schema.KeyspaceParams; +-import org.apache.cassandra.thrift.*; +-import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.thrift.TException; +-import org.apache.thrift.protocol.TBinaryProtocol; +-import org.apache.thrift.protocol.TProtocol; +-import org.apache.thrift.transport.TFramedTransport; +-import org.apache.thrift.transport.TSocket; +-import org.apache.thrift.transport.TTransport; +-import org.apache.thrift.transport.TTransportException; +- +-import static org.junit.Assert.assertEquals; +-import static org.junit.Assert.assertNotNull; +- +-/** +- * Example how to use an embedded cassandra service. +- * +- * Tests connect to localhost:9160 when the embedded server is running. +- * +- */ +-public class EmbeddedCassandraServiceTest +-{ +- +- private static EmbeddedCassandraService cassandra; +- private static final String KEYSPACE1 = "EmbeddedCassandraServiceTest"; +- private static final String CF_STANDARD = "Standard1"; +- +- @BeforeClass +- public static void defineSchema() throws Exception +- { +- SchemaLoader.prepareServer(); +- setup(); +- SchemaLoader.createKeyspace(KEYSPACE1, +- KeyspaceParams.simple(1), +- CFMetaData.Builder.create(KEYSPACE1, CF_STANDARD, true, false, false) +- .addPartitionKey("pk", AsciiType.instance) +- .addClusteringColumn("ck", AsciiType.instance) +- .addRegularColumn("val", AsciiType.instance) +- .build()); +- } +- +- /** +- * Set embedded cassandra up and spawn it in a new thread. +- * +- * @throws TTransportException +- * @throws IOException +- * @throws InterruptedException +- */ +- public static void setup() throws TTransportException, IOException, InterruptedException +- { +- // unique ks / cfs mean no need to clear the schema +- cassandra = new EmbeddedCassandraService(); +- cassandra.start(); +- } +- +- @Test +- public void testEmbeddedCassandraService() +- throws AuthenticationException, AuthorizationException, InvalidRequestException, UnavailableException, TimedOutException, TException, NotFoundException, CharacterCodingException +- { +- Cassandra.Client client = getClient(); +- client.set_keyspace(KEYSPACE1); +- +- ByteBuffer key_user_id = ByteBufferUtil.bytes("1"); +- +- long timestamp = System.currentTimeMillis(); +- ColumnPath cp = new ColumnPath("Standard1"); +- ColumnParent par = new ColumnParent("Standard1"); +- cp.column = ByteBufferUtil.bytes("name"); +- +- // insert +- client.insert(key_user_id, +- par, +- new Column(ByteBufferUtil.bytes("name")).setValue(ByteBufferUtil.bytes("Ran")).setTimestamp(timestamp), +- ConsistencyLevel.ONE); +- +- // read +- ColumnOrSuperColumn got = client.get(key_user_id, cp, ConsistencyLevel.ONE); +- +- // assert +- assertNotNull("Got a null ColumnOrSuperColumn", got); +- assertEquals("Ran", ByteBufferUtil.string(got.getColumn().value)); +- } +- +- /** +- * Gets a connection to the localhost client +- * +- * @return +- * @throws TTransportException +- */ +- private Cassandra.Client getClient() throws TTransportException +- { +- TTransport tr = new TFramedTransport(new TSocket("localhost", DatabaseDescriptor.getRpcPort())); +- TProtocol proto = new TBinaryProtocol(tr); +- Cassandra.Client client = new Cassandra.Client(proto); +- tr.open(); +- return client; +- } +-} +diff --git a/test/unit/org/apache/cassandra/transport/MessagePayloadTest.java b/test/unit/org/apache/cassandra/transport/MessagePayloadTest.java +index 865a173..931d4d6 100644 +--- a/test/unit/org/apache/cassandra/transport/MessagePayloadTest.java ++++ b/test/unit/org/apache/cassandra/transport/MessagePayloadTest.java +@@ -294,11 +294,6 @@ public class MessagePayloadTest extends CQLTester + return QueryProcessor.instance.getPrepared(id); + } + +- public ParsedStatement.Prepared getPreparedForThrift(Integer id) +- { +- return QueryProcessor.instance.getPreparedForThrift(id); +- } +- + public ResultMessage.Prepared prepare(String query, + QueryState state, + Map customPayload) +diff --git a/test/unit/org/apache/cassandra/triggers/TriggersTest.java b/test/unit/org/apache/cassandra/triggers/TriggersTest.java +index e5a2dd6..37638c9 100644 +--- a/test/unit/org/apache/cassandra/triggers/TriggersTest.java ++++ b/test/unit/org/apache/cassandra/triggers/TriggersTest.java +@@ -37,9 +37,7 @@ import org.apache.cassandra.db.partitions.Partition; + import org.apache.cassandra.exceptions.ConfigurationException; + import org.apache.cassandra.exceptions.RequestExecutionException; + import org.apache.cassandra.service.StorageService; +-import org.apache.cassandra.thrift.*; + import org.apache.cassandra.utils.FBUtilities; +-import org.apache.thrift.protocol.TBinaryProtocol; + + import static org.apache.cassandra.utils.ByteBufferUtil.bytes; + import static org.apache.cassandra.utils.ByteBufferUtil.toInt; +@@ -49,7 +47,6 @@ import static org.junit.Assert.assertTrue; + public class TriggersTest + { + private static boolean triggerCreated = false; +- private static ThriftServer thriftServer; + + private static String ksName = "triggers_test_ks"; + private static String cfName = "test_table"; +@@ -65,11 +62,6 @@ public class TriggersTest + public void setup() throws Exception + { + StorageService.instance.initServer(0); +- if (thriftServer == null || ! thriftServer.isRunning()) +- { +- thriftServer = new ThriftServer(InetAddress.getLocalHost(), 9170, 50); +- thriftServer.start(); +- } + + String cql = String.format("CREATE KEYSPACE IF NOT EXISTS %s " + + "WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1}", +@@ -92,15 +84,6 @@ public class TriggersTest + } + } + +- @AfterClass +- public static void teardown() +- { +- if (thriftServer != null && thriftServer.isRunning()) +- { +- thriftServer.stop(); +- } +- } +- + @Test + public void executeTriggerOnCqlInsert() throws Exception + { +@@ -121,43 +104,6 @@ public class TriggersTest + } + + @Test +- public void executeTriggerOnThriftInsert() throws Exception +- { +- Cassandra.Client client = new Cassandra.Client( +- new TBinaryProtocol( +- new TFramedTransportFactory().openTransport( +- InetAddress.getLocalHost().getHostName(), 9170))); +- client.set_keyspace(ksName); +- client.insert(bytes(2), +- new ColumnParent(cfName), +- getColumnForInsert("v1", 2), +- org.apache.cassandra.thrift.ConsistencyLevel.ONE); +- +- assertUpdateIsAugmented(2); +- } +- +- @Test +- public void executeTriggerOnThriftBatchUpdate() throws Exception +- { +- Cassandra.Client client = new Cassandra.Client( +- new TBinaryProtocol( +- new TFramedTransportFactory().openTransport( +- InetAddress.getLocalHost().getHostName(), 9170))); +- client.set_keyspace(ksName); +- org.apache.cassandra.thrift.Mutation mutation = new org.apache.cassandra.thrift.Mutation(); +- ColumnOrSuperColumn cosc = new ColumnOrSuperColumn(); +- cosc.setColumn(getColumnForInsert("v1", 3)); +- mutation.setColumn_or_supercolumn(cosc); +- client.batch_mutate( +- Collections.singletonMap(bytes(3), +- Collections.singletonMap(cfName, +- Collections.singletonList(mutation))), +- org.apache.cassandra.thrift.ConsistencyLevel.ONE); +- +- assertUpdateIsAugmented(3); +- } +- +- @Test + public void executeTriggerOnCqlInsertWithConditions() throws Exception + { + String cql = String.format("INSERT INTO %s.%s (k, v1) VALUES (4, 4) IF NOT EXISTS", ksName, cfName); +@@ -177,24 +123,6 @@ public class TriggersTest + assertUpdateIsAugmented(5); + } + +- @Test +- public void executeTriggerOnThriftCASOperation() throws Exception +- { +- Cassandra.Client client = new Cassandra.Client( +- new TBinaryProtocol( +- new TFramedTransportFactory().openTransport( +- InetAddress.getLocalHost().getHostName(), 9170))); +- client.set_keyspace(ksName); +- client.cas(bytes(6), +- cfName, +- Collections.emptyList(), +- Collections.singletonList(getColumnForInsert("v1", 6)), +- org.apache.cassandra.thrift.ConsistencyLevel.LOCAL_SERIAL, +- org.apache.cassandra.thrift.ConsistencyLevel.ONE); +- +- assertUpdateIsAugmented(6); +- } +- + @Test(expected=org.apache.cassandra.exceptions.InvalidRequestException.class) + public void onCqlUpdateWithConditionsRejectGeneratedUpdatesForDifferentPartition() throws Exception + { +@@ -227,56 +155,6 @@ public class TriggersTest + } + } + +- @Test(expected=InvalidRequestException.class) +- public void onThriftCASRejectGeneratedUpdatesForDifferentPartition() throws Exception +- { +- String cf = "cf" + System.nanoTime(); +- try +- { +- setupTableWithTrigger(cf, CrossPartitionTrigger.class); +- Cassandra.Client client = new Cassandra.Client( +- new TBinaryProtocol( +- new TFramedTransportFactory().openTransport( +- InetAddress.getLocalHost().getHostName(), 9170))); +- client.set_keyspace(ksName); +- client.cas(bytes(9), +- cf, +- Collections.emptyList(), +- Collections.singletonList(getColumnForInsert("v1", 9)), +- org.apache.cassandra.thrift.ConsistencyLevel.LOCAL_SERIAL, +- org.apache.cassandra.thrift.ConsistencyLevel.ONE); +- } +- finally +- { +- assertUpdateNotExecuted(cf, 9); +- } +- } +- +- @Test(expected=InvalidRequestException.class) +- public void onThriftCASRejectGeneratedUpdatesForDifferentCF() throws Exception +- { +- String cf = "cf" + System.nanoTime(); +- try +- { +- setupTableWithTrigger(cf, CrossTableTrigger.class); +- Cassandra.Client client = new Cassandra.Client( +- new TBinaryProtocol( +- new TFramedTransportFactory().openTransport( +- InetAddress.getLocalHost().getHostName(), 9170))); +- client.set_keyspace(ksName); +- client.cas(bytes(10), +- cf, +- Collections.emptyList(), +- Collections.singletonList(getColumnForInsert("v1", 10)), +- org.apache.cassandra.thrift.ConsistencyLevel.LOCAL_SERIAL, +- org.apache.cassandra.thrift.ConsistencyLevel.ONE); +- } +- finally +- { +- assertUpdateNotExecuted(cf, 10); +- } +- } +- + @Test(expected=org.apache.cassandra.exceptions.InvalidRequestException.class) + public void ifTriggerThrowsErrorNoMutationsAreApplied() throws Exception + { +@@ -325,15 +203,6 @@ public class TriggersTest + assertTrue(rs.isEmpty()); + } + +- private org.apache.cassandra.thrift.Column getColumnForInsert(String columnName, int value) +- { +- org.apache.cassandra.thrift.Column column = new org.apache.cassandra.thrift.Column(); +- column.setName(LegacyLayout.makeLegacyComparator(Schema.instance.getCFMetaData(ksName, cfName)).fromString(columnName)); +- column.setValue(bytes(value)); +- column.setTimestamp(System.currentTimeMillis()); +- return column; +- } +- + public static class TestTrigger implements ITrigger + { + public Collection augment(Partition partition) +diff --git a/tools/bin/cassandra.in.bat b/tools/bin/cassandra.in.bat +index 0fdd31a..8804921 100644 +--- a/tools/bin/cassandra.in.bat ++++ b/tools/bin/cassandra.in.bat +@@ -39,7 +39,7 @@ goto :eof + :okClasspath + + REM Include the build\classes\main directory so it works in development +-set CASSANDRA_CLASSPATH=%CLASSPATH%;%CASSANDRA_CONF%;"%CASSANDRA_HOME%\build\classes\main";"%CASSANDRA_HOME%\build\classes\thrift";"%CASSANDRA_HOME%\build\classes\stress" ++set CASSANDRA_CLASSPATH=%CLASSPATH%;%CASSANDRA_CONF%;"%CASSANDRA_HOME%\build\classes\main";"%CASSANDRA_HOME%\build\classes\stress" + + REM Add the default storage location. Can be overridden in conf\cassandra.yaml + set CASSANDRA_PARAMS=%CASSANDRA_PARAMS% "-Dcassandra.storagedir=%CASSANDRA_HOME%\data" +diff --git a/tools/bin/cassandra.in.sh b/tools/bin/cassandra.in.sh +index 004f394..2f0d1ec 100644 +--- a/tools/bin/cassandra.in.sh ++++ b/tools/bin/cassandra.in.sh +@@ -28,7 +28,6 @@ fi + # it's just used here in constructing the classpath. + cassandra_bin="$CASSANDRA_HOME/build/classes/main" + cassandra_bin="$cassandra_bin:$CASSANDRA_HOME/build/classes/stress" +-cassandra_bin="$cassandra_bin:$CASSANDRA_HOME/build/classes/thrift" + #cassandra_bin="$cassandra_home/build/cassandra.jar" + + # the default location for commitlogs, sstables, and saved caches +diff --git a/tools/stress/README.txt b/tools/stress/README.txt +index aa89dab..585409e 100644 +--- a/tools/stress/README.txt ++++ b/tools/stress/README.txt +@@ -55,7 +55,7 @@ Primary Options: + -rate: + Thread count, rate limit or automatic mode (default is auto) + -mode: +- Thrift or CQL with options ++ CQL transport options + -errors: + How to handle errors when encountered during stress + -sample: +diff --git a/tools/stress/src/org/apache/cassandra/stress/Operation.java b/tools/stress/src/org/apache/cassandra/stress/Operation.java +index 16f6f04..35ab8fc 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/Operation.java ++++ b/tools/stress/src/org/apache/cassandra/stress/Operation.java +@@ -25,9 +25,7 @@ import com.google.common.util.concurrent.RateLimiter; + import org.apache.cassandra.stress.settings.SettingsLog; + import org.apache.cassandra.stress.settings.StressSettings; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.stress.util.ThriftClient; + import org.apache.cassandra.stress.util.Timer; +-import org.apache.cassandra.thrift.InvalidRequestException; + import org.apache.cassandra.transport.SimpleClient; + + public abstract class Operation +@@ -55,13 +53,6 @@ public abstract class Operation + return false; + } + +- /** +- * Run operation +- * @param client Cassandra Thrift client connection +- * @throws IOException on any I/O error. +- */ +- public abstract void run(ThriftClient client) throws IOException; +- + public void run(SimpleClient client) throws IOException + { + throw new UnsupportedOperationException(); +@@ -128,7 +119,7 @@ public abstract class Operation + protected String getExceptionMessage(Exception e) + { + String className = e.getClass().getSimpleName(); +- String message = (e instanceof InvalidRequestException) ? ((InvalidRequestException) e).getWhy() : e.getMessage(); ++ String message = e.getMessage(); + return (message == null) ? "(" + className + ")" : String.format("(%s): %s", className, message); + } + +diff --git a/tools/stress/src/org/apache/cassandra/stress/StressAction.java b/tools/stress/src/org/apache/cassandra/stress/StressAction.java +index 7c37ef8..6d8ff40 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/StressAction.java ++++ b/tools/stress/src/org/apache/cassandra/stress/StressAction.java +@@ -33,7 +33,6 @@ import org.apache.cassandra.stress.settings.ConnectionAPI; + import org.apache.cassandra.stress.settings.SettingsCommand; + import org.apache.cassandra.stress.settings.StressSettings; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.stress.util.ThriftClient; + import org.apache.cassandra.transport.SimpleClient; + + import com.google.common.util.concurrent.Uninterruptibles; +@@ -393,7 +392,6 @@ public class StressAction implements Runnable + try + { + SimpleClient sclient = null; +- ThriftClient tclient = null; + JavaDriverClient jclient = null; + + +@@ -406,10 +404,6 @@ public class StressAction implements Runnable + case SIMPLE_NATIVE: + sclient = settings.getSimpleNativeClient(); + break; +- case THRIFT: +- case THRIFT_SMART: +- tclient = settings.getThriftClient(); +- break; + default: + throw new IllegalStateException(); + } +@@ -436,10 +430,8 @@ public class StressAction implements Runnable + case SIMPLE_NATIVE: + op.run(sclient); + break; +- case THRIFT: +- case THRIFT_SMART: + default: +- op.run(tclient); ++ throw new IllegalStateException(); + } + } + catch (Exception e) +diff --git a/tools/stress/src/org/apache/cassandra/stress/StressProfile.java b/tools/stress/src/org/apache/cassandra/stress/StressProfile.java +index 8b59bda..42892cd 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/StressProfile.java ++++ b/tools/stress/src/org/apache/cassandra/stress/StressProfile.java +@@ -48,11 +48,7 @@ import org.apache.cassandra.stress.operations.userdefined.SchemaQuery; + import org.apache.cassandra.stress.operations.userdefined.ValidatingSchemaQuery; + import org.apache.cassandra.stress.settings.*; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.stress.util.ThriftClient; + import org.apache.cassandra.stress.util.Timer; +-import org.apache.cassandra.thrift.Compression; +-import org.apache.cassandra.thrift.ThriftConversion; +-import org.apache.thrift.TException; + import org.yaml.snakeyaml.Yaml; + import org.yaml.snakeyaml.constructor.Constructor; + import org.yaml.snakeyaml.error.YAMLException; +@@ -81,12 +77,10 @@ public class StressProfile implements Serializable + transient volatile RatioDistributionFactory selectchance; + transient volatile RatioDistributionFactory rowPopulation; + transient volatile PreparedStatement insertStatement; +- transient volatile Integer thriftInsertId; + transient volatile List validationFactories; + + transient volatile Map argSelects; + transient volatile Map queryStatements; +- transient volatile Map thriftQueryIds; + + private static final Pattern lowercaseAlphanumeric = Pattern.compile("[a-z0-9_]+"); + +@@ -313,42 +307,25 @@ public class StressProfile implements Serializable + { + if (queryStatements == null) + { +- try +- { +- JavaDriverClient jclient = settings.getJavaDriverClient(); +- ThriftClient tclient = null; +- +- if (settings.mode.api != ConnectionAPI.JAVA_DRIVER_NATIVE) +- tclient = settings.getThriftClient(); +- +- Map stmts = new HashMap<>(); +- Map tids = new HashMap<>(); +- Map args = new HashMap<>(); +- for (Map.Entry e : queries.entrySet()) +- { +- stmts.put(e.getKey().toLowerCase(), jclient.prepare(e.getValue().cql)); ++ JavaDriverClient jclient = settings.getJavaDriverClient(); + +- if (tclient != null) +- tids.put(e.getKey().toLowerCase(), tclient.prepare_cql3_query(e.getValue().cql, Compression.NONE)); +- +- args.put(e.getKey().toLowerCase(), e.getValue().fields == null +- ? SchemaQuery.ArgSelect.MULTIROW +- : SchemaQuery.ArgSelect.valueOf(e.getValue().fields.toUpperCase())); +- } +- thriftQueryIds = tids; +- queryStatements = stmts; +- argSelects = args; +- } +- catch (TException e) ++ Map stmts = new HashMap<>(); ++ Map tids = new HashMap<>(); ++ Map args = new HashMap<>(); ++ for (Map.Entry e : queries.entrySet()) + { +- throw new RuntimeException(e); ++ stmts.put(e.getKey().toLowerCase(), jclient.prepare(e.getValue().cql)); ++ args.put(e.getKey().toLowerCase(), e.getValue().fields == null ++ ? SchemaQuery.ArgSelect.MULTIROW ++ : SchemaQuery.ArgSelect.valueOf(e.getValue().fields.toUpperCase())); + } ++ queryStatements = stmts; ++ argSelects = args; + } + } + } + +- return new SchemaQuery(timer, settings, generator, seeds, thriftQueryIds.get(name), queryStatements.get(name), +- ThriftConversion.fromThrift(settings.command.consistencyLevel), argSelects.get(name)); ++ return new SchemaQuery(timer, settings, generator, seeds, queryStatements.get(name), settings.command.consistencyLevel, argSelects.get(name)); + } + + public Operation getBulkReadQueries(String name, Timer timer, StressSettings settings, TokenRangeIterator tokenRangeIterator, boolean isWarmup) +@@ -484,24 +461,12 @@ public class StressProfile implements Serializable + JavaDriverClient client = settings.getJavaDriverClient(); + String query = sb.toString(); + +- if (settings.mode.api != ConnectionAPI.JAVA_DRIVER_NATIVE) +- { +- try +- { +- thriftInsertId = settings.getThriftClient().prepare_cql3_query(query, Compression.NONE); +- } +- catch (TException e) +- { +- throw new RuntimeException(e); +- } +- } +- + insertStatement = client.prepare(query); + } + } + } + +- return new SchemaInsert(timer, settings, generator, seedManager, partitions.get(), selectchance.get(), rowPopulation.get(), thriftInsertId, insertStatement, ThriftConversion.fromThrift(settings.command.consistencyLevel), batchType); ++ return new SchemaInsert(timer, settings, generator, seedManager, partitions.get(), selectchance.get(), rowPopulation.get(), insertStatement, settings.command.consistencyLevel, batchType); + } + + public List getValidate(Timer timer, PartitionGenerator generator, SeedManager seedManager, StressSettings settings) +@@ -520,7 +485,7 @@ public class StressProfile implements Serializable + + List queries = new ArrayList<>(); + for (ValidatingSchemaQuery.Factory factory : validationFactories) +- queries.add(factory.create(timer, settings, generator, seedManager, ThriftConversion.fromThrift(settings.command.consistencyLevel))); ++ queries.add(factory.create(timer, settings, generator, seedManager, settings.command.consistencyLevel)); + return queries; + } + +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/CqlOperation.java b/tools/stress/src/org/apache/cassandra/stress/operations/predefined/CqlOperation.java +index 097c1a0..4525294 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/CqlOperation.java ++++ b/tools/stress/src/org/apache/cassandra/stress/operations/predefined/CqlOperation.java +@@ -35,16 +35,10 @@ import org.apache.cassandra.stress.settings.Command; + import org.apache.cassandra.stress.settings.ConnectionStyle; + import org.apache.cassandra.stress.settings.StressSettings; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.stress.util.ThriftClient; + import org.apache.cassandra.stress.util.Timer; +-import org.apache.cassandra.thrift.Compression; +-import org.apache.cassandra.thrift.CqlResult; +-import org.apache.cassandra.thrift.CqlRow; +-import org.apache.cassandra.thrift.ThriftConversion; + import org.apache.cassandra.transport.SimpleClient; + import org.apache.cassandra.transport.messages.ResultMessage; + import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.thrift.TException; + + public abstract class CqlOperation extends PredefinedOperation + { +@@ -72,17 +66,13 @@ public abstract class CqlOperation extends PredefinedOperation + Object idobj = getCqlCache(); + if (idobj == null) + { +- try +- { +- id = client.createPreparedStatement(buildQuery()); +- } catch (TException e) +- { +- throw new RuntimeException(e); +- } ++ id = client.createPreparedStatement(buildQuery()); + storeCqlCache(id); + } + else ++ { + id = idobj; ++ } + + op = buildRunOp(client, null, id, queryParams, key); + } +@@ -244,12 +234,6 @@ public abstract class CqlOperation extends PredefinedOperation + + + @Override +- public void run(final ThriftClient client) throws IOException +- { +- run(wrap(client)); +- } +- +- @Override + public void run(SimpleClient client) throws IOException + { + run(wrap(client)); +@@ -261,11 +245,6 @@ public abstract class CqlOperation extends PredefinedOperation + run(wrap(client)); + } + +- public ClientWrapper wrap(ThriftClient client) +- { +- return new Cql3CassandraClientWrapper(client); +- } +- + public ClientWrapper wrap(JavaDriverClient client) + { + return new JavaDriverWrapper(client); +@@ -278,9 +257,9 @@ public abstract class CqlOperation extends PredefinedOperation + + protected interface ClientWrapper + { +- Object createPreparedStatement(String cqlQuery) throws TException; +- V execute(Object preparedStatementId, ByteBuffer key, List queryParams, ResultHandler handler) throws TException; +- V execute(String query, ByteBuffer key, List queryParams, ResultHandler handler) throws TException; ++ Object createPreparedStatement(String cqlQuery); ++ V execute(Object preparedStatementId, ByteBuffer key, List queryParams, ResultHandler handler); ++ V execute(String query, ByteBuffer key, List queryParams, ResultHandler handler); + } + + private final class JavaDriverWrapper implements ClientWrapper +@@ -295,7 +274,7 @@ public abstract class CqlOperation extends PredefinedOperation + public V execute(String query, ByteBuffer key, List queryParams, ResultHandler handler) + { + String formattedQuery = formatCqlQuery(query, queryParams); +- return handler.javaDriverHandler().apply(client.execute(formattedQuery, ThriftConversion.fromThrift(settings.command.consistencyLevel))); ++ return handler.javaDriverHandler().apply(client.execute(formattedQuery, settings.command.consistencyLevel)); + } + + @Override +@@ -305,7 +284,7 @@ public abstract class CqlOperation extends PredefinedOperation + client.executePrepared( + (PreparedStatement) preparedStatementId, + queryParams, +- ThriftConversion.fromThrift(settings.command.consistencyLevel))); ++ settings.command.consistencyLevel)); + } + + @Override +@@ -327,17 +306,17 @@ public abstract class CqlOperation extends PredefinedOperation + public V execute(String query, ByteBuffer key, List queryParams, ResultHandler handler) + { + String formattedQuery = formatCqlQuery(query, queryParams); +- return handler.thriftHandler().apply(client.execute(formattedQuery, ThriftConversion.fromThrift(settings.command.consistencyLevel))); ++ return handler.simpleClientHandler().apply(client.execute(formattedQuery, settings.command.consistencyLevel)); + } + + @Override + public V execute(Object preparedStatementId, ByteBuffer key, List queryParams, ResultHandler handler) + { +- return handler.thriftHandler().apply( ++ return handler.simpleClientHandler().apply( + client.executePrepared( + (byte[]) preparedStatementId, + toByteBufferParams(queryParams), +- ThriftConversion.fromThrift(settings.command.consistencyLevel))); ++ settings.command.consistencyLevel)); + } + + @Override +@@ -347,46 +326,11 @@ public abstract class CqlOperation extends PredefinedOperation + } + } + +- // client wrapper for Cql3 +- private final class Cql3CassandraClientWrapper implements ClientWrapper +- { +- final ThriftClient client; +- private Cql3CassandraClientWrapper(ThriftClient client) +- { +- this.client = client; +- } +- +- @Override +- public V execute(String query, ByteBuffer key, List queryParams, ResultHandler handler) throws TException +- { +- String formattedQuery = formatCqlQuery(query, queryParams); +- return handler.simpleNativeHandler().apply( +- client.execute_cql3_query(formattedQuery, key, Compression.NONE, settings.command.consistencyLevel) +- ); +- } +- +- @Override +- public V execute(Object preparedStatementId, ByteBuffer key, List queryParams, ResultHandler handler) throws TException +- { +- Integer id = (Integer) preparedStatementId; +- return handler.simpleNativeHandler().apply( +- client.execute_prepared_cql3_query(id, key, toByteBufferParams(queryParams), settings.command.consistencyLevel) +- ); +- } +- +- @Override +- public Object createPreparedStatement(String cqlQuery) throws TException +- { +- return client.prepare_cql3_query(cqlQuery, Compression.NONE); +- } +- } +- + // interface for building functions to standardise results from each client + protected static interface ResultHandler + { + Function javaDriverHandler(); +- Function thriftHandler(); +- Function simpleNativeHandler(); ++ Function simpleClientHandler(); + } + + protected static class RowCountHandler implements ResultHandler +@@ -409,7 +353,7 @@ public abstract class CqlOperation extends PredefinedOperation + } + + @Override +- public Function thriftHandler() ++ public Function simpleClientHandler() + { + return new Function() + { +@@ -420,27 +364,6 @@ public abstract class CqlOperation extends PredefinedOperation + } + }; + } +- +- @Override +- public Function simpleNativeHandler() +- { +- return new Function() +- { +- +- @Override +- public Integer apply(CqlResult result) +- { +- switch (result.getType()) +- { +- case ROWS: +- return result.getRows().size(); +- default: +- return 1; +- } +- } +- }; +- } +- + } + + // Processes results from each client into an array of all key bytes returned +@@ -475,7 +398,7 @@ public abstract class CqlOperation extends PredefinedOperation + } + + @Override +- public Function thriftHandler() ++ public Function simpleClientHandler() + { + return new Function() + { +@@ -499,29 +422,6 @@ public abstract class CqlOperation extends PredefinedOperation + } + }; + } +- +- @Override +- public Function simpleNativeHandler() +- { +- return new Function() +- { +- +- @Override +- public ByteBuffer[][] apply(CqlResult result) +- { +- ByteBuffer[][] r = new ByteBuffer[result.getRows().size()][]; +- for (int i = 0 ; i < r.length ; i++) +- { +- CqlRow row = result.getRows().get(i); +- r[i] = new ByteBuffer[row.getColumns().size()]; +- for (int j = 0 ; j < r[i].length ; j++) +- r[i][j] = ByteBuffer.wrap(row.getColumns().get(j).getValue()); +- } +- return r; +- } +- }; +- } +- + } + // Processes results from each client into an array of all key bytes returned + protected static final class KeysHandler implements ResultHandler +@@ -550,7 +450,7 @@ public abstract class CqlOperation extends PredefinedOperation + } + + @Override +- public Function thriftHandler() ++ public Function simpleClientHandler() + { + return new Function() + { +@@ -570,24 +470,6 @@ public abstract class CqlOperation extends PredefinedOperation + } + }; + } +- +- @Override +- public Function simpleNativeHandler() +- { +- return new Function() +- { +- +- @Override +- public byte[][] apply(CqlResult result) +- { +- byte[][] r = new byte[result.getRows().size()][]; +- for (int i = 0 ; i < r.length ; i++) +- r[i] = result.getRows().get(i).getKey(); +- return r; +- } +- }; +- } +- + } + + private static String getUnQuotedCqlBlob(ByteBuffer term) +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/PredefinedOperation.java b/tools/stress/src/org/apache/cassandra/stress/operations/predefined/PredefinedOperation.java +index 1f9a2c8..81b5eba 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/PredefinedOperation.java ++++ b/tools/stress/src/org/apache/cassandra/stress/operations/predefined/PredefinedOperation.java +@@ -30,8 +30,6 @@ import org.apache.cassandra.stress.settings.Command; + import org.apache.cassandra.stress.settings.CqlVersion; + import org.apache.cassandra.stress.settings.StressSettings; + import org.apache.cassandra.stress.util.Timer; +-import org.apache.cassandra.thrift.SlicePredicate; +-import org.apache.cassandra.thrift.SliceRange; + + public abstract class PredefinedOperation extends PartitionOperation + { +@@ -100,24 +98,6 @@ public abstract class PredefinedOperation extends PartitionOperation + { + return indices != null ? indices.length : ub - lb; + } +- +- SlicePredicate predicate() +- { +- final SlicePredicate predicate = new SlicePredicate(); +- if (indices == null) +- { +- predicate.setSlice_range(new SliceRange() +- .setStart(settings.columns.names.get(lb)) +- .setFinish(EMPTY_BYTE_ARRAY) +- .setReversed(false) +- .setCount(count()) +- ); +- } +- else +- predicate.setColumn_names(select(settings.columns.names)); +- return predicate; +- +- } + } + + public String toString() +@@ -185,57 +165,14 @@ public abstract class PredefinedOperation extends PartitionOperation + switch (type) + { + case READ: +- switch(settings.mode.style) +- { +- case THRIFT: +- return new ThriftReader(timer, generator, seedManager, settings); +- case CQL: +- case CQL_PREPARED: +- return new CqlReader(timer, generator, seedManager, settings); +- default: +- throw new UnsupportedOperationException(); +- } +- +- ++ return new CqlReader(timer, generator, seedManager, settings); + case COUNTER_READ: +- switch(settings.mode.style) +- { +- case THRIFT: +- return new ThriftCounterGetter(timer, generator, seedManager, settings); +- case CQL: +- case CQL_PREPARED: +- return new CqlCounterGetter(timer, generator, seedManager, settings); +- default: +- throw new UnsupportedOperationException(); +- } +- ++ return new CqlCounterGetter(timer, generator, seedManager, settings); + case WRITE: +- +- switch(settings.mode.style) +- { +- case THRIFT: +- return new ThriftInserter(timer, generator, seedManager, settings); +- case CQL: +- case CQL_PREPARED: +- return new CqlInserter(timer, generator, seedManager, settings); +- default: +- throw new UnsupportedOperationException(); +- } +- ++ return new CqlInserter(timer, generator, seedManager, settings); + case COUNTER_WRITE: +- switch(settings.mode.style) +- { +- case THRIFT: +- return new ThriftCounterAdder(counteradd, timer, generator, seedManager, settings); +- case CQL: +- case CQL_PREPARED: +- return new CqlCounterAdder(counteradd, timer, generator, seedManager, settings); +- default: +- throw new UnsupportedOperationException(); +- } +- ++ return new CqlCounterAdder(counteradd, timer, generator, seedManager, settings); + } +- + throw new UnsupportedOperationException(); + } + +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftCounterAdder.java b/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftCounterAdder.java +deleted file mode 100644 +index be34a07..0000000 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftCounterAdder.java ++++ /dev/null +@@ -1,94 +0,0 @@ +-/** +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.stress.operations.predefined; +- +-import java.io.IOException; +-import java.nio.ByteBuffer; +-import java.util.ArrayList; +-import java.util.Collections; +-import java.util.List; +-import java.util.Map; +- +-import org.apache.cassandra.stress.generate.Distribution; +-import org.apache.cassandra.stress.generate.DistributionFactory; +-import org.apache.cassandra.stress.generate.PartitionGenerator; +-import org.apache.cassandra.stress.generate.SeedManager; +-import org.apache.cassandra.stress.settings.Command; +-import org.apache.cassandra.stress.settings.StressSettings; +-import org.apache.cassandra.stress.util.ThriftClient; +-import org.apache.cassandra.stress.util.Timer; +-import org.apache.cassandra.thrift.ColumnOrSuperColumn; +-import org.apache.cassandra.thrift.CounterColumn; +-import org.apache.cassandra.thrift.Mutation; +- +-public class ThriftCounterAdder extends PredefinedOperation +-{ +- +- final Distribution counteradd; +- public ThriftCounterAdder(DistributionFactory counteradd, Timer timer, PartitionGenerator generator, SeedManager seedManager, StressSettings settings) +- { +- super(Command.COUNTER_WRITE, timer, generator, seedManager, settings); +- this.counteradd = counteradd.get(); +- } +- +- public boolean isWrite() +- { +- return true; +- } +- +- public void run(final ThriftClient client) throws IOException +- { +- List columns = new ArrayList<>(); +- for (ByteBuffer name : select().select(settings.columns.names)) +- columns.add(new CounterColumn(name, counteradd.next())); +- +- List mutations = new ArrayList<>(columns.size()); +- for (CounterColumn c : columns) +- { +- ColumnOrSuperColumn cosc = new ColumnOrSuperColumn().setCounter_column(c); +- mutations.add(new Mutation().setColumn_or_supercolumn(cosc)); +- } +- Map> row = Collections.singletonMap(type.table, mutations); +- +- final ByteBuffer key = getKey(); +- final Map>> record = Collections.singletonMap(key, row); +- +- timeWithRetry(new RunOp() +- { +- @Override +- public boolean run() throws Exception +- { +- client.batch_mutate(record, settings.command.consistencyLevel); +- return true; +- } +- +- @Override +- public int partitionCount() +- { +- return 1; +- } +- +- @Override +- public int rowCount() +- { +- return 1; +- } +- }); +- } +- +-} +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftCounterGetter.java b/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftCounterGetter.java +deleted file mode 100644 +index ca81fe9..0000000 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftCounterGetter.java ++++ /dev/null +@@ -1,67 +0,0 @@ +-/** +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.stress.operations.predefined; +- +-import java.io.IOException; +-import java.nio.ByteBuffer; +-import java.util.List; +- +-import org.apache.cassandra.stress.generate.PartitionGenerator; +-import org.apache.cassandra.stress.generate.SeedManager; +-import org.apache.cassandra.stress.settings.Command; +-import org.apache.cassandra.stress.settings.StressSettings; +-import org.apache.cassandra.stress.util.ThriftClient; +-import org.apache.cassandra.stress.util.Timer; +-import org.apache.cassandra.thrift.ColumnParent; +-import org.apache.cassandra.thrift.SlicePredicate; +- +-public class ThriftCounterGetter extends PredefinedOperation +-{ +- public ThriftCounterGetter(Timer timer, PartitionGenerator generator, SeedManager seedManager, StressSettings settings) +- { +- super(Command.COUNTER_READ, timer, generator, seedManager, settings); +- } +- +- public void run(final ThriftClient client) throws IOException +- { +- final SlicePredicate predicate = select().predicate(); +- final ByteBuffer key = getKey(); +- timeWithRetry(new RunOp() +- { +- @Override +- public boolean run() throws Exception +- { +- List r = client.get_slice(key, new ColumnParent(type.table), predicate, settings.command.consistencyLevel); +- return r != null && r.size() > 0; +- } +- +- @Override +- public int partitionCount() +- { +- return 1; +- } +- +- @Override +- public int rowCount() +- { +- return 1; +- } +- }); +- } +- +-} +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftInserter.java b/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftInserter.java +deleted file mode 100644 +index 1827c06..0000000 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftInserter.java ++++ /dev/null +@@ -1,104 +0,0 @@ +-/** +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.stress.operations.predefined; +- +-import java.io.IOException; +-import java.nio.ByteBuffer; +-import java.util.ArrayList; +-import java.util.Collections; +-import java.util.List; +-import java.util.Map; +- +-import org.apache.cassandra.stress.generate.PartitionGenerator; +-import org.apache.cassandra.stress.generate.SeedManager; +-import org.apache.cassandra.stress.settings.Command; +-import org.apache.cassandra.stress.settings.StressSettings; +-import org.apache.cassandra.stress.util.ThriftClient; +-import org.apache.cassandra.stress.util.Timer; +-import org.apache.cassandra.thrift.Column; +-import org.apache.cassandra.thrift.ColumnOrSuperColumn; +-import org.apache.cassandra.thrift.Mutation; +-import org.apache.cassandra.utils.FBUtilities; +- +-public final class ThriftInserter extends PredefinedOperation +-{ +- +- public ThriftInserter(Timer timer, PartitionGenerator generator, SeedManager seedManager, StressSettings settings) +- { +- super(Command.WRITE, timer, generator, seedManager, settings); +- } +- +- public boolean isWrite() +- { +- return true; +- } +- +- public void run(final ThriftClient client) throws IOException +- { +- final ByteBuffer key = getKey(); +- final List columns = getColumns(); +- +- List mutations = new ArrayList<>(columns.size()); +- for (Column c : columns) +- { +- ColumnOrSuperColumn column = new ColumnOrSuperColumn().setColumn(c); +- mutations.add(new Mutation().setColumn_or_supercolumn(column)); +- } +- Map> row = Collections.singletonMap(type.table, mutations); +- +- final Map>> record = Collections.singletonMap(key, row); +- +- timeWithRetry(new RunOp() +- { +- @Override +- public boolean run() throws Exception +- { +- client.batch_mutate(record, settings.command.consistencyLevel); +- return true; +- } +- +- @Override +- public int partitionCount() +- { +- return 1; +- } +- +- @Override +- public int rowCount() +- { +- return 1; +- } +- }); +- } +- +- protected List getColumns() +- { +- final ColumnSelection selection = select(); +- final List values = getColumnValues(selection); +- final List columns = new ArrayList<>(values.size()); +- final List names = select().select(settings.columns.names); +- for (int i = 0 ; i < values.size() ; i++) +- columns.add(new Column(names.get(i)) +- .setValue(values.get(i)) +- .setTimestamp(settings.columns.timestamp != null +- ? Long.parseLong(settings.columns.timestamp) +- : FBUtilities.timestampMicros())); +- return columns; +- } +- +-} +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftReader.java b/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftReader.java +deleted file mode 100644 +index d77dc6a..0000000 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/predefined/ThriftReader.java ++++ /dev/null +@@ -1,78 +0,0 @@ +-/** +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +-package org.apache.cassandra.stress.operations.predefined; +- +-import java.io.IOException; +-import java.nio.ByteBuffer; +-import java.util.List; +- +-import org.apache.cassandra.stress.generate.PartitionGenerator; +-import org.apache.cassandra.stress.generate.SeedManager; +-import org.apache.cassandra.stress.settings.Command; +-import org.apache.cassandra.stress.settings.StressSettings; +-import org.apache.cassandra.stress.util.ThriftClient; +-import org.apache.cassandra.stress.util.Timer; +-import org.apache.cassandra.thrift.ColumnOrSuperColumn; +-import org.apache.cassandra.thrift.ColumnParent; +- +-public final class ThriftReader extends PredefinedOperation +-{ +- +- public ThriftReader(Timer timer, PartitionGenerator generator, SeedManager seedManager, StressSettings settings) +- { +- super(Command.READ, timer, generator, seedManager, settings); +- } +- +- public void run(final ThriftClient client) throws IOException +- { +- final ColumnSelection select = select(); +- final ByteBuffer key = getKey(); +- final List expect = getColumnValues(select); +- timeWithRetry(new RunOp() +- { +- @Override +- public boolean run() throws Exception +- { +- List row = client.get_slice(key, new ColumnParent(type.table), select.predicate(), settings.command.consistencyLevel); +- if (expect == null) +- return !row.isEmpty(); +- if (row == null) +- return false; +- if (row.size() != expect.size()) +- return false; +- for (int i = 0 ; i < row.size() ; i++) +- if (!row.get(i).getColumn().bufferForValue().equals(expect.get(i))) +- return false; +- return true; +- } +- +- @Override +- public int partitionCount() +- { +- return 1; +- } +- +- @Override +- public int rowCount() +- { +- return 1; +- } +- }); +- } +- +-} +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaInsert.java b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaInsert.java +index d9fcac8..df3362e 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaInsert.java ++++ b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaInsert.java +@@ -24,6 +24,7 @@ package org.apache.cassandra.stress.operations.userdefined; + import java.io.IOException; + import java.util.ArrayList; + import java.util.List; ++import java.util.stream.Collectors; + + import com.datastax.driver.core.BatchStatement; + import com.datastax.driver.core.BoundStatement; +@@ -33,7 +34,6 @@ import org.apache.cassandra.db.ConsistencyLevel; + import org.apache.cassandra.stress.generate.*; + import org.apache.cassandra.stress.settings.StressSettings; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.stress.util.ThriftClient; + import org.apache.cassandra.stress.util.Timer; + + public class SchemaInsert extends SchemaStatement +@@ -41,9 +41,9 @@ public class SchemaInsert extends SchemaStatement + + private final BatchStatement.Type batchType; + +- public SchemaInsert(Timer timer, StressSettings settings, PartitionGenerator generator, SeedManager seedManager, Distribution batchSize, RatioDistribution useRatio, RatioDistribution rowPopulation, Integer thriftId, PreparedStatement statement, ConsistencyLevel cl, BatchStatement.Type batchType) ++ public SchemaInsert(Timer timer, StressSettings settings, PartitionGenerator generator, SeedManager seedManager, Distribution batchSize, RatioDistribution useRatio, RatioDistribution rowPopulation, PreparedStatement statement, ConsistencyLevel cl, BatchStatement.Type batchType) + { +- super(timer, settings, new DataSpec(generator, seedManager, batchSize, useRatio, rowPopulation), statement, thriftId, cl); ++ super(timer, settings, new DataSpec(generator, seedManager, batchSize, useRatio, rowPopulation), statement, statement.getVariables().asList().stream().map(d -> d.getName()).collect(Collectors.toList()), cl); + this.batchType = batchType; + } + +@@ -90,29 +90,6 @@ public class SchemaInsert extends SchemaStatement + } + } + +- private class ThriftRun extends Runner +- { +- final ThriftClient client; +- +- private ThriftRun(ThriftClient client) +- { +- this.client = client; +- } +- +- public boolean run() throws Exception +- { +- for (PartitionIterator iterator : partitions) +- { +- while (iterator.hasNext()) +- { +- client.execute_prepared_cql3_query(thriftId, iterator.getToken(), thriftRowArgs(iterator.next()), settings.command.consistencyLevel); +- rowCount += 1; +- } +- } +- return true; +- } +- } +- + @Override + public void run(JavaDriverClient client) throws IOException + { +@@ -123,11 +100,4 @@ public class SchemaInsert extends SchemaStatement + { + return true; + } +- +- @Override +- public void run(ThriftClient client) throws IOException +- { +- timeWithRetry(new ThriftRun(client)); +- } +- + } +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaQuery.java b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaQuery.java +index 9b5c4ae..19cc959 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaQuery.java ++++ b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaQuery.java +@@ -26,6 +26,7 @@ import java.nio.ByteBuffer; + import java.util.ArrayList; + import java.util.List; + import java.util.Random; ++import java.util.stream.Collectors; + + import com.datastax.driver.core.BoundStatement; + import com.datastax.driver.core.PreparedStatement; +@@ -34,10 +35,7 @@ import org.apache.cassandra.db.ConsistencyLevel; + import org.apache.cassandra.stress.generate.*; + import org.apache.cassandra.stress.settings.StressSettings; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.stress.util.ThriftClient; + import org.apache.cassandra.stress.util.Timer; +-import org.apache.cassandra.thrift.CqlResult; +-import org.apache.cassandra.thrift.ThriftConversion; + + public class SchemaQuery extends SchemaStatement + { +@@ -51,9 +49,10 @@ public class SchemaQuery extends SchemaStatement + final Object[][] randomBuffer; + final Random random = new Random(); + +- public SchemaQuery(Timer timer, StressSettings settings, PartitionGenerator generator, SeedManager seedManager, Integer thriftId, PreparedStatement statement, ConsistencyLevel cl, ArgSelect argSelect) ++ public SchemaQuery(Timer timer, StressSettings settings, PartitionGenerator generator, SeedManager seedManager, PreparedStatement statement, ConsistencyLevel cl, ArgSelect argSelect) + { +- super(timer, settings, new DataSpec(generator, seedManager, new DistributionFixed(1), settings.insert.rowPopulationRatio.get(), argSelect == ArgSelect.MULTIROW ? statement.getVariables().size() : 1), statement, thriftId, cl); ++ super(timer, settings, new DataSpec(generator, seedManager, new DistributionFixed(1), settings.insert.rowPopulationRatio.get(), argSelect == ArgSelect.MULTIROW ? statement.getVariables().size() : 1), statement, ++ statement.getVariables().asList().stream().map(d -> d.getName()).collect(Collectors.toList()), cl); + this.argSelect = argSelect; + randomBuffer = new Object[argumentIndex.length][argumentIndex.length]; + } +@@ -76,24 +75,6 @@ public class SchemaQuery extends SchemaStatement + } + } + +- private class ThriftRun extends Runner +- { +- final ThriftClient client; +- +- private ThriftRun(ThriftClient client) +- { +- this.client = client; +- } +- +- public boolean run() throws Exception +- { +- CqlResult rs = client.execute_prepared_cql3_query(thriftId, partitions.get(0).getToken(), thriftArgs(), ThriftConversion.toThrift(cl)); +- rowCount = rs.getRowsSize(); +- partitionCount = Math.min(1, rowCount); +- return true; +- } +- } +- + private int fillRandom() + { + int c = 0; +@@ -130,36 +111,9 @@ public class SchemaQuery extends SchemaStatement + } + } + +- List thriftArgs() +- { +- switch (argSelect) +- { +- case MULTIROW: +- List args = new ArrayList<>(); +- int c = fillRandom(); +- for (int i = 0 ; i < argumentIndex.length ; i++) +- { +- int argIndex = argumentIndex[i]; +- args.add(spec.partitionGenerator.convert(argIndex, randomBuffer[argIndex < 0 ? 0 : random.nextInt(c)][i])); +- } +- return args; +- case SAMEROW: +- return thriftRowArgs(partitions.get(0).next()); +- default: +- throw new IllegalStateException(); +- } +- } +- + @Override + public void run(JavaDriverClient client) throws IOException + { + timeWithRetry(new JavaDriverRun(client)); + } +- +- @Override +- public void run(ThriftClient client) throws IOException +- { +- timeWithRetry(new ThriftRun(client)); +- } +- + } +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaStatement.java b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaStatement.java +index 166d689..214b2bb 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaStatement.java ++++ b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/SchemaStatement.java +@@ -41,18 +41,16 @@ public abstract class SchemaStatement extends PartitionOperation + { + + final PreparedStatement statement; +- final Integer thriftId; + final ConsistencyLevel cl; + final int[] argumentIndex; + final Object[] bindBuffer; + final ColumnDefinitions definitions; + + public SchemaStatement(Timer timer, StressSettings settings, DataSpec spec, +- PreparedStatement statement, Integer thriftId, ConsistencyLevel cl) ++ PreparedStatement statement, List bindNames, ConsistencyLevel cl) + { + super(timer, settings, spec); + this.statement = statement; +- this.thriftId = thriftId; + this.cl = cl; + argumentIndex = new int[statement.getVariables().size()]; + bindBuffer = new Object[argumentIndex.length]; +@@ -81,11 +79,12 @@ public abstract class SchemaStatement extends PartitionOperation + return statement.bind(bindBuffer); + } + +- List thriftRowArgs(Row row) ++ List rowArgs(Row row) + { + List args = new ArrayList<>(); + for (int i : argumentIndex) +- args.add(spec.partitionGenerator.convert(i, row.get(i))); ++ args.add(spec.partitionGenerator.convert(i, ++ row.get(i))); + return args; + } + +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/TokenRangeQuery.java b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/TokenRangeQuery.java +index 198f1f5..f5dfd86 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/TokenRangeQuery.java ++++ b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/TokenRangeQuery.java +@@ -43,7 +43,6 @@ import org.apache.cassandra.stress.WorkManager; + import org.apache.cassandra.stress.generate.TokenRangeIterator; + import org.apache.cassandra.stress.settings.StressSettings; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.stress.util.ThriftClient; + import org.apache.cassandra.stress.util.Timer; + + public class TokenRangeQuery extends Operation +@@ -220,34 +219,12 @@ public class TokenRangeQuery extends Operation + return ret.toString(); + } + +- private static class ThriftRun extends Runner +- { +- final ThriftClient client; +- +- private ThriftRun(ThriftClient client) +- { +- this.client = client; +- } +- +- public boolean run() throws Exception +- { +- throw new OperationNotSupportedException("Bulk read over thrift not supported"); +- } +- } +- +- + @Override + public void run(JavaDriverClient client) throws IOException + { + timeWithRetry(new JavaDriverRun(client)); + } + +- @Override +- public void run(ThriftClient client) throws IOException +- { +- timeWithRetry(new ThriftRun(client)); +- } +- + public int ready(WorkManager workManager) + { + tokenRangeIterator.update(); +diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/ValidatingSchemaQuery.java b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/ValidatingSchemaQuery.java +index 33f6f80..fc1f47d 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/ValidatingSchemaQuery.java ++++ b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/ValidatingSchemaQuery.java +@@ -36,14 +36,8 @@ import org.apache.cassandra.stress.generate.Row; + import org.apache.cassandra.stress.operations.PartitionOperation; + import org.apache.cassandra.stress.settings.StressSettings; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.stress.util.ThriftClient; + import org.apache.cassandra.stress.util.Timer; +-import org.apache.cassandra.thrift.Compression; +-import org.apache.cassandra.thrift.CqlResult; +-import org.apache.cassandra.thrift.CqlRow; +-import org.apache.cassandra.thrift.ThriftConversion; + import org.apache.cassandra.utils.Pair; +-import org.apache.thrift.TException; + + public class ValidatingSchemaQuery extends PartitionOperation + { +@@ -151,50 +145,6 @@ public class ValidatingSchemaQuery extends PartitionOperation + } + } + +- private class ThriftRun extends Runner +- { +- final ThriftClient client; +- +- private ThriftRun(ThriftClient client, PartitionIterator iter) +- { +- super(iter); +- this.client = client; +- } +- +- public boolean run() throws Exception +- { +- CqlResult rs = client.execute_prepared_cql3_query(statements[statementIndex].thriftId, partitions.get(0).getToken(), thriftArgs(), ThriftConversion.toThrift(cl)); +- int[] valueIndex = new int[rs.getSchema().name_types.size()]; +- for (int i = 0 ; i < valueIndex.length ; i++) +- valueIndex[i] = spec.partitionGenerator.indexOf(rs.fieldForId(i).getFieldName()); +- int r = 0; +- if (!statements[statementIndex].inclusiveStart && iter.hasNext()) +- iter.next(); +- while (iter.hasNext()) +- { +- Row expectedRow = iter.next(); +- if (!statements[statementIndex].inclusiveEnd && !iter.hasNext()) +- break; +- +- if (r == rs.num) +- return false; +- +- rowCount++; +- CqlRow actualRow = rs.getRows().get(r++); +- for (int i = 0 ; i < actualRow.getColumnsSize() ; i++) +- { +- ByteBuffer expectedValue = spec.partitionGenerator.convert(valueIndex[i], expectedRow.get(valueIndex[i])); +- ByteBuffer actualValue = actualRow.getColumns().get(i).value; +- if (!expectedValue.equals(actualValue)) +- return false; +- } +- } +- assert r == rs.num; +- partitionCount = Math.min(1, rowCount); +- return true; +- } +- } +- + BoundStatement bind(int statementIndex) + { + int pkc = bounds.left.partitionKey.length; +@@ -205,32 +155,12 @@ public class ValidatingSchemaQuery extends PartitionOperation + return statements[statementIndex].statement.bind(bindBuffer); + } + +- List thriftArgs() +- { +- List args = new ArrayList<>(); +- int pkc = bounds.left.partitionKey.length; +- for (int i = 0 ; i < pkc ; i++) +- args.add(spec.partitionGenerator.convert(-i, bounds.left.partitionKey[i])); +- int ccc = bounds.left.row.length; +- for (int i = 0 ; i < ccc ; i++) +- args.add(spec.partitionGenerator.convert(i, bounds.left.get(i))); +- for (int i = 0 ; i < ccc ; i++) +- args.add(spec.partitionGenerator.convert(i, bounds.right.get(i))); +- return args; +- } +- + @Override + public void run(JavaDriverClient client) throws IOException + { + timeWithRetry(new JavaDriverRun(client, partitions.get(0))); + } + +- @Override +- public void run(ThriftClient client) throws IOException +- { +- timeWithRetry(new ThriftRun(client, partitions.get(0))); +- } +- + public static class Factory + { + final ValidatingStatement[] statements; +@@ -311,13 +241,11 @@ public class ValidatingSchemaQuery extends PartitionOperation + private static class ValidatingStatement + { + final PreparedStatement statement; +- final Integer thriftId; + final boolean inclusiveStart; + final boolean inclusiveEnd; +- private ValidatingStatement(PreparedStatement statement, Integer thriftId, boolean inclusiveStart, boolean inclusiveEnd) ++ private ValidatingStatement(PreparedStatement statement, boolean inclusiveStart, boolean inclusiveEnd) + { + this.statement = statement; +- this.thriftId = thriftId; + this.inclusiveStart = inclusiveStart; + this.inclusiveEnd = inclusiveEnd; + } +@@ -326,16 +254,7 @@ public class ValidatingSchemaQuery extends PartitionOperation + private static ValidatingStatement prepare(StressSettings settings, String cql, boolean incLb, boolean incUb) + { + JavaDriverClient jclient = settings.getJavaDriverClient(); +- ThriftClient tclient = settings.getThriftClient(); + PreparedStatement statement = jclient.prepare(cql); +- try +- { +- Integer thriftId = tclient.prepare_cql3_query(cql, Compression.NONE); +- return new ValidatingStatement(statement, thriftId, incLb, incUb); +- } +- catch (TException e) +- { +- throw new RuntimeException(e); +- } ++ return new ValidatingStatement(statement, incLb, incUb); + } + } +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/CliOption.java b/tools/stress/src/org/apache/cassandra/stress/settings/CliOption.java +index 36284ab..018669a 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/CliOption.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/CliOption.java +@@ -30,7 +30,7 @@ public enum CliOption + INSERT("Insert specific options relating to various methods for batching and splitting partition updates", SettingsInsert.helpPrinter()), + COL("Column details such as size and count distribution, data generator, names, comparator and if super columns should be used", SettingsColumn.helpPrinter()), + RATE("Thread count, rate limit or automatic mode (default is auto)", SettingsRate.helpPrinter()), +- MODE("Thrift or CQL with options", SettingsMode.helpPrinter()), ++ MODE("CQL mode options", SettingsMode.helpPrinter()), + ERRORS("How to handle errors when encountered during stress", SettingsErrors.helpPrinter()), + SCHEMA("Replication settings, compression, compaction, etc.", SettingsSchema.helpPrinter()), + NODE("Nodes to connect to", SettingsNode.helpPrinter()), +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/ConnectionAPI.java b/tools/stress/src/org/apache/cassandra/stress/settings/ConnectionAPI.java +index 942250f..554c16b 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/ConnectionAPI.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/ConnectionAPI.java +@@ -23,6 +23,6 @@ package org.apache.cassandra.stress.settings; + + public enum ConnectionAPI + { +- THRIFT, THRIFT_SMART, SIMPLE_NATIVE, JAVA_DRIVER_NATIVE ++ SIMPLE_NATIVE, JAVA_DRIVER_NATIVE + } + +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/ConnectionStyle.java b/tools/stress/src/org/apache/cassandra/stress/settings/ConnectionStyle.java +index 6b408a9..1884cc8 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/ConnectionStyle.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/ConnectionStyle.java +@@ -1,6 +1,4 @@ +-package org.apache.cassandra.stress.settings; + /* +- * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information +@@ -8,23 +6,21 @@ package org.apache.cassandra.stress.settings; + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at +- * ++ * + * http://www.apache.org/licenses/LICENSE-2.0 +- * ++ * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. +- * + */ +- ++package org.apache.cassandra.stress.settings; + + public enum ConnectionStyle + { + CQL, +- CQL_PREPARED, +- THRIFT ++ CQL_PREPARED + } + +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/Legacy.java b/tools/stress/src/org/apache/cassandra/stress/settings/Legacy.java +index 70693af..f9cbe8e 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/Legacy.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/Legacy.java +@@ -51,7 +51,6 @@ public class Legacy implements Serializable + availableOptions.addOption("s", "stdev", true, "Standard Deviation for gaussian read key generation, default:0.1"); + availableOptions.addOption("r", "random", false, "Use random key generator for read key generation (STDEV will have no effect), default:false"); + availableOptions.addOption("f", "file", true, "Write output to given file"); +- availableOptions.addOption("p", "port", true, "Thrift port, default:9160"); + availableOptions.addOption("o", "operation", true, "Operation to perform (WRITE, READ, READWRITE, RANGE_SLICE, INDEXED_RANGE_SLICE, MULTI_GET, COUNTERWRITE, COUNTER_GET), default:WRITE"); + availableOptions.addOption("u", "supercolumns", true, "Number of super columns per key, default:1"); + availableOptions.addOption("y", "family-type", true, "Column Family Type (Super, Standard), default:Standard"); +@@ -60,8 +59,6 @@ public class Legacy implements Serializable + availableOptions.addOption("i", "progress-interval", true, "Progress Report Interval (seconds), default:10"); + availableOptions.addOption("g", "keys-per-call", true, "Number of keys to get_range_slices or multiget per call, default:1000"); + availableOptions.addOption("l", "replication-factor", true, "Replication Factor to use when creating needed column families, default:1"); +- availableOptions.addOption("L3", "enable-cql3", false, "Perform queries using CQL3 (Cassandra Query Language v 3.0.0)"); +- availableOptions.addOption("b", "enable-native-protocol", false, "Use the binary native protocol (only work along with -L3)"); + availableOptions.addOption("P", "use-prepared-statements", false, "Perform queries using prepared statements (only applicable to CQL)."); + availableOptions.addOption("e", "consistency-level", true, "Consistency Level to use (ONE, QUORUM, LOCAL_QUORUM, EACH_QUORUM, ALL, ANY), default:ONE"); + availableOptions.addOption("x", "create-index", true, "Type of index to create on needed column families (KEYS)"); +@@ -73,7 +70,6 @@ public class Legacy implements Serializable + availableOptions.addOption("Q", "query-names", true, "Comma-separated list of column names to retrieve from each row."); + availableOptions.addOption("Z", "compaction-strategy", true, "CompactionStrategy to use."); + availableOptions.addOption("U", "comparator", true, "Column Comparator to use. Currently supported types are: TimeUUIDType, AsciiType, UTF8Type."); +- availableOptions.addOption("tf", "transport-factory", true, "Fully-qualified TTransportFactory class name for creating a connection. Note: For Thrift over SSL, use org.apache.cassandra.stress.SSLTransportFactory."); + availableOptions.addOption("ns", "no-statistics", false, "Turn off the aggegate statistics that is normally output after completion."); + availableOptions.addOption("ts", SSL_TRUSTSTORE, true, "SSL: full path to truststore"); + availableOptions.addOption("tspw", SSL_TRUSTSTORE_PW, true, "SSL: full path to truststore"); +@@ -231,10 +227,7 @@ public class Legacy implements Serializable + r.add("-schema", "replication(" + rep + ")"); + } + +- if (cmd.hasOption("L3")) +- r.add("-mode", (cmd.hasOption("P") ? "prepared" : "") + (cmd.hasOption("b") ? "native" : "") + "cql3"); +- else +- r.add("-mode", "thrift"); ++ r.add("-mode", (cmd.hasOption("P") ? "prepared" : "") + "native" + "cql3"); + + if (cmd.hasOption("I")) + r.add("-schema", "compression=" + cmd.getOptionValue("I")); +@@ -255,9 +248,6 @@ public class Legacy implements Serializable + if (cmd.hasOption("ns")) + r.add("-log", "no-summary"); + +- if (cmd.hasOption("tf")) +- r.add("-transport", "factory=" + cmd.getOptionValue("tf")); +- + if(cmd.hasOption(SSL_TRUSTSTORE)) + r.add("-transport", "truststore=" + cmd.getOptionValue(SSL_TRUSTSTORE)); + +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java +index bf78bf9..3b55feb 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java +@@ -40,7 +40,6 @@ import org.apache.cassandra.utils.ByteBufferUtil; + */ + public class SettingsColumn implements Serializable + { +- + public final int maxColumnsPerKey; + public transient List names; + public final List namestrs; +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java +index bfd8529..dc16a14 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java +@@ -31,7 +31,7 @@ import com.google.common.util.concurrent.Uninterruptibles; + + import org.apache.cassandra.stress.operations.OpDistributionFactory; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.thrift.ConsistencyLevel; ++import org.apache.cassandra.db.ConsistencyLevel; + + // Generic command settings - common to read/write/etc + public abstract class SettingsCommand implements Serializable +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsMode.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsMode.java +index 8f0ab25..7a6c69f 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsMode.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsMode.java +@@ -58,7 +58,7 @@ public class SettingsMode implements Serializable + protocolVersion = "NEWEST_SUPPORTED".equals(opts.protocolVersion.value()) + ? ProtocolVersion.NEWEST_SUPPORTED + : ProtocolVersion.fromInt(Integer.parseInt(opts.protocolVersion.value())); +- api = opts.mode().displayPrefix.equals("native") ? ConnectionAPI.JAVA_DRIVER_NATIVE : ConnectionAPI.THRIFT; ++ api = ConnectionAPI.JAVA_DRIVER_NATIVE; + style = opts.useUnPrepared.setByUser() ? ConnectionStyle.CQL : ConnectionStyle.CQL_PREPARED; + compression = ProtocolOptions.Compression.valueOf(opts.useCompression.value().toUpperCase()).name(); + username = opts.user.value(); +@@ -108,21 +108,6 @@ public class SettingsMode implements Serializable + maxPendingPerConnection = null; + connectionsPerHost = null; + } +- else if (options instanceof ThriftOptions) +- { +- ThriftOptions opts = (ThriftOptions) options; +- protocolVersion = ProtocolVersion.NEWEST_SUPPORTED; +- cqlVersion = CqlVersion.NOCQL; +- api = opts.smart.setByUser() ? ConnectionAPI.THRIFT_SMART : ConnectionAPI.THRIFT; +- style = ConnectionStyle.THRIFT; +- compression = ProtocolOptions.Compression.NONE.name(); +- username = opts.user.value(); +- password = opts.password.value(); +- authProviderClassname = null; +- authProvider = null; +- maxPendingPerConnection = null; +- connectionsPerHost = null; +- } + else + throw new IllegalStateException(); + } +@@ -143,15 +128,6 @@ public class SettingsMode implements Serializable + } + } + +- private static final class Cql3ThriftOptions extends Cql3Options +- { +- final OptionSimple mode = new OptionSimple("thrift", "", null, "", true); +- OptionSimple mode() +- { +- return mode; +- } +- } +- + private static abstract class Cql3Options extends GroupedOptions + { + final OptionSimple api = new OptionSimple("cql3", "", null, "", true); +@@ -174,7 +150,6 @@ public class SettingsMode implements Serializable + } + } + +- + private static final class Cql3SimpleNativeOptions extends GroupedOptions + { + final OptionSimple api = new OptionSimple("cql3", "", null, "", true); +@@ -189,21 +164,6 @@ public class SettingsMode implements Serializable + } + } + +- private static final class ThriftOptions extends GroupedOptions +- { +- final OptionSimple api = new OptionSimple("thrift", "", null, "", true); +- final OptionSimple smart = new OptionSimple("smart", "", null, "", false); +- final OptionSimple user = new OptionSimple("user=", ".+", null, "username", false); +- final OptionSimple password = new OptionSimple("password=", ".+", null, "password", false); +- +- +- @Override +- public List options() +- { +- return Arrays.asList(api, smart, user, password); +- } +- } +- + // CLI Utility Methods + + public static SettingsMode get(Map clArgs) +@@ -218,7 +178,7 @@ public class SettingsMode implements Serializable + return new SettingsMode(opts); + } + +- GroupedOptions options = GroupedOptions.select(params, new ThriftOptions(), new Cql3NativeOptions(), new Cql3SimpleNativeOptions()); ++ GroupedOptions options = GroupedOptions.select(params, new Cql3NativeOptions(), new Cql3SimpleNativeOptions()); + if (options == null) + { + printHelp(); +@@ -230,7 +190,7 @@ public class SettingsMode implements Serializable + + public static void printHelp() + { +- GroupedOptions.printOptions(System.out, "-mode", new ThriftOptions(), new Cql3NativeOptions(), new Cql3SimpleNativeOptions()); ++ GroupedOptions.printOptions(System.out, "-mode", new Cql3NativeOptions(), new Cql3SimpleNativeOptions()); + } + + public static Runnable helpPrinter() +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsNode.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsNode.java +index 89b7871..272bf58 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsNode.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsNode.java +@@ -78,7 +78,6 @@ public class SettingsNode implements Serializable + Set r = new HashSet<>(); + switch (settings.mode.api) + { +- case THRIFT_SMART: + case JAVA_DRIVER_NATIVE: + if (!isWhiteList) + { +@@ -86,7 +85,6 @@ public class SettingsNode implements Serializable + r.add(host.getAddress().getHostName()); + break; + } +- case THRIFT: + case SIMPLE_NATIVE: + for (InetAddress address : resolveAllSpecified()) + r.add(address.getHostName()); +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsPort.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsPort.java +index 88ff9a6..021178e 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsPort.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsPort.java +@@ -30,13 +30,11 @@ public class SettingsPort implements Serializable + { + + public final int nativePort; +- public final int thriftPort; + public final int jmxPort; + + public SettingsPort(PortOptions options) + { + nativePort = Integer.parseInt(options.nativePort.value()); +- thriftPort = Integer.parseInt(options.thriftPort.value()); + jmxPort = Integer.parseInt(options.jmxPort.value()); + } + +@@ -45,13 +43,12 @@ public class SettingsPort implements Serializable + private static final class PortOptions extends GroupedOptions + { + final OptionSimple nativePort = new OptionSimple("native=", "[0-9]+", "9042", "Use this port for the Cassandra native protocol", false); +- final OptionSimple thriftPort = new OptionSimple("thrift=", "[0-9]+", "9160", "Use this port for the thrift protocol", false); + final OptionSimple jmxPort = new OptionSimple("jmx=", "[0-9]+", "7199", "Use this port for retrieving statistics over jmx", false); + + @Override + public List options() + { +- return Arrays.asList(nativePort, thriftPort, jmxPort); ++ return Arrays.asList(nativePort, jmxPort); + } + } + +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsSchema.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsSchema.java +index 5c437a9..8c6a269 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsSchema.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsSchema.java +@@ -27,8 +27,6 @@ import java.util.*; + + import com.datastax.driver.core.exceptions.AlreadyExistsException; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.thrift.*; +-import org.apache.cassandra.thrift.ConsistencyLevel; + import org.apache.cassandra.utils.ByteBufferUtil; + + public class SettingsSchema implements Serializable +@@ -58,22 +56,10 @@ public class SettingsSchema implements Serializable + compactionStrategyOptions = options.compaction.getOptions(); + } + +- public void createKeySpaces(StressSettings settings) +- { +- if (settings.mode.api != ConnectionAPI.JAVA_DRIVER_NATIVE) +- { +- createKeySpacesThrift(settings); +- } +- else +- { +- createKeySpacesNative(settings); +- } +- } +- + /** + * Create Keyspace with Standard and Super/Counter column families + */ +- public void createKeySpacesNative(StressSettings settings) ++ public void createKeySpaces(StressSettings settings) + { + + JavaDriverClient client = settings.getJavaDriverClient(false); +@@ -214,75 +200,6 @@ public class SettingsSchema implements Serializable + return b.toString(); + } + +- /** +- * Create Keyspace with Standard and Super/Counter column families +- */ +- public void createKeySpacesThrift(StressSettings settings) +- { +- KsDef ksdef = new KsDef(); +- +- // column family for standard columns +- CfDef standardCfDef = new CfDef(keyspace, "standard1"); +- Map compressionOptions = new HashMap<>(); +- if (compression != null) +- compressionOptions.put("sstable_compression", compression); +- +- String comparator = settings.columns.comparator; +- standardCfDef.setComparator_type(comparator) +- .setDefault_validation_class(DEFAULT_VALIDATOR) +- .setCompression_options(compressionOptions); +- +- for (int i = 0; i < settings.columns.names.size(); i++) +- standardCfDef.addToColumn_metadata(new ColumnDef(settings.columns.names.get(i), "BytesType")); +- +- // column family for standard counters +- CfDef counterCfDef = new CfDef(keyspace, "counter1") +- .setComparator_type(comparator) +- .setDefault_validation_class("CounterColumnType") +- .setCompression_options(compressionOptions); +- +- ksdef.setName(keyspace); +- ksdef.setStrategy_class(replicationStrategy); +- +- if (!replicationStrategyOptions.isEmpty()) +- { +- ksdef.setStrategy_options(replicationStrategyOptions); +- } +- +- if (compactionStrategy != null) +- { +- standardCfDef.setCompaction_strategy(compactionStrategy); +- counterCfDef.setCompaction_strategy(compactionStrategy); +- if (!compactionStrategyOptions.isEmpty()) +- { +- standardCfDef.setCompaction_strategy_options(compactionStrategyOptions); +- counterCfDef.setCompaction_strategy_options(compactionStrategyOptions); +- } +- } +- +- ksdef.setCf_defs(new ArrayList<>(Arrays.asList(standardCfDef, counterCfDef))); +- +- Cassandra.Client client = settings.getRawThriftClient(false); +- +- try +- { +- client.system_add_keyspace(ksdef); +- client.set_keyspace(keyspace); +- +- System.out.println(String.format("Created keyspaces. Sleeping %ss for propagation.", settings.node.nodes.size())); +- Thread.sleep(settings.node.nodes.size() * 1000L); // seconds +- } +- catch (InvalidRequestException e) +- { +- System.err.println("Unable to create stress keyspace: " + e.getWhy()); +- } +- catch (Exception e) +- { +- System.err.println("!!!! " + e.getMessage()); +- } +- } +- +- + // Option Declarations + + private static final class Options extends GroupedOptions +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsTransport.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsTransport.java +index b6d1d90..72ad646 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsTransport.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsTransport.java +@@ -28,74 +28,14 @@ import java.util.List; + import java.util.Map; + + import org.apache.cassandra.config.EncryptionOptions; +-import org.apache.cassandra.thrift.ITransportFactory; +-import org.apache.cassandra.thrift.SSLTransportFactory; +-import org.apache.cassandra.thrift.TFramedTransportFactory; + + public class SettingsTransport implements Serializable + { +- +- private final String fqFactoryClass; + private final TOptions options; +- private ITransportFactory factory; + + public SettingsTransport(TOptions options) + { + this.options = options; +- this.fqFactoryClass = options.factory.value(); +- try +- { +- Class clazz = Class.forName(fqFactoryClass); +- if (!ITransportFactory.class.isAssignableFrom(clazz)) +- throw new IllegalArgumentException(clazz + " is not a valid transport factory"); +- // check we can instantiate it +- clazz.newInstance(); +- } +- catch (Exception e) +- { +- throw new IllegalArgumentException("Invalid transport factory class: " + options.factory.value(), e); +- } +- } +- +- private void configureTransportFactory(ITransportFactory transportFactory, TOptions options) +- { +- Map factoryOptions = new HashMap<>(); +- // If the supplied factory supports the same set of options as our SSL impl, set those +- if (transportFactory.supportedOptions().contains(SSLTransportFactory.TRUSTSTORE)) +- factoryOptions.put(SSLTransportFactory.TRUSTSTORE, options.trustStore.value()); +- if (transportFactory.supportedOptions().contains(SSLTransportFactory.TRUSTSTORE_PASSWORD)) +- factoryOptions.put(SSLTransportFactory.TRUSTSTORE_PASSWORD, options.trustStorePw.value()); +- if (transportFactory.supportedOptions().contains(SSLTransportFactory.KEYSTORE)) +- factoryOptions.put(SSLTransportFactory.KEYSTORE, options.keyStore.value()); +- if (transportFactory.supportedOptions().contains(SSLTransportFactory.KEYSTORE_PASSWORD)) +- factoryOptions.put(SSLTransportFactory.KEYSTORE_PASSWORD, options.keyStorePw.value()); +- if (transportFactory.supportedOptions().contains(SSLTransportFactory.PROTOCOL)) +- factoryOptions.put(SSLTransportFactory.PROTOCOL, options.protocol.value()); +- if (transportFactory.supportedOptions().contains(SSLTransportFactory.CIPHER_SUITES)) +- factoryOptions.put(SSLTransportFactory.CIPHER_SUITES, options.ciphers.value()); +- // Now check if any of the factory's supported options are set as system properties +- for (String optionKey : transportFactory.supportedOptions()) +- if (System.getProperty(optionKey) != null) +- factoryOptions.put(optionKey, System.getProperty(optionKey)); +- +- transportFactory.setOptions(factoryOptions); +- } +- +- public synchronized ITransportFactory getFactory() +- { +- if (factory == null) +- { +- try +- { +- this.factory = (ITransportFactory) Class.forName(fqFactoryClass).newInstance(); +- configureTransportFactory(this.factory, this.options); +- } +- catch (Exception e) +- { +- throw new RuntimeException(e); +- } +- } +- return factory; + } + + public EncryptionOptions.ClientEncryptionOptions getEncryptionOptions() +@@ -127,20 +67,18 @@ public class SettingsTransport implements Serializable + + static class TOptions extends GroupedOptions implements Serializable + { +- final OptionSimple factory = new OptionSimple("factory=", ".*", TFramedTransportFactory.class.getName(), "Fully-qualified ITransportFactory class name for creating a connection. Note: For Thrift over SSL, use org.apache.cassandra.thrift.SSLTransportFactory.", false); + final OptionSimple trustStore = new OptionSimple("truststore=", ".*", null, "SSL: full path to truststore", false); + final OptionSimple trustStorePw = new OptionSimple("truststore-password=", ".*", null, "SSL: truststore password", false); + final OptionSimple keyStore = new OptionSimple("keystore=", ".*", null, "SSL: full path to keystore", false); + final OptionSimple keyStorePw = new OptionSimple("keystore-password=", ".*", null, "SSL: keystore password", false); + final OptionSimple protocol = new OptionSimple("ssl-protocol=", ".*", "TLS", "SSL: connection protocol to use", false); + final OptionSimple alg = new OptionSimple("ssl-alg=", ".*", "SunX509", "SSL: algorithm", false); +- final OptionSimple storeType = new OptionSimple("store-type=", ".*", "JKS", "SSL: keystore format", false); + final OptionSimple ciphers = new OptionSimple("ssl-ciphers=", ".*", "TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA", "SSL: comma delimited list of encryption suites to use", false); + + @Override + public List options() + { +- return Arrays.asList(factory, trustStore, trustStorePw, keyStore, keyStorePw, protocol, alg, storeType, ciphers); ++ return Arrays.asList(trustStore, trustStorePw, keyStore, keyStorePw, protocol, alg, ciphers); + } + } + +diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/StressSettings.java b/tools/stress/src/org/apache/cassandra/stress/settings/StressSettings.java +index de03737..e3a8d88 100644 +--- a/tools/stress/src/org/apache/cassandra/stress/settings/StressSettings.java ++++ b/tools/stress/src/org/apache/cassandra/stress/settings/StressSettings.java +@@ -28,15 +28,7 @@ import com.datastax.driver.core.Metadata; + import com.google.common.collect.ImmutableMap; + import org.apache.cassandra.config.EncryptionOptions; + import org.apache.cassandra.stress.util.JavaDriverClient; +-import org.apache.cassandra.stress.util.SimpleThriftClient; +-import org.apache.cassandra.stress.util.SmartThriftClient; +-import org.apache.cassandra.stress.util.ThriftClient; +-import org.apache.cassandra.thrift.AuthenticationRequest; +-import org.apache.cassandra.thrift.Cassandra; +-import org.apache.cassandra.thrift.InvalidRequestException; + import org.apache.cassandra.transport.SimpleClient; +-import org.apache.thrift.protocol.TBinaryProtocol; +-import org.apache.thrift.transport.TTransport; + + public class StressSettings implements Serializable + { +@@ -89,81 +81,6 @@ public class StressSettings implements Serializable + this.tokenRange = tokenRange; + } + +- private SmartThriftClient tclient; +- +- /** +- * Thrift client connection +- * @return cassandra client connection +- */ +- public synchronized ThriftClient getThriftClient() +- { +- if (mode.api != ConnectionAPI.THRIFT_SMART) +- return getSimpleThriftClient(); +- +- if (tclient == null) +- tclient = getSmartThriftClient(); +- +- return tclient; +- } +- +- private SmartThriftClient getSmartThriftClient() +- { +- Metadata metadata = getJavaDriverClient().getCluster().getMetadata(); +- return new SmartThriftClient(this, schema.keyspace, metadata); +- } +- +- /** +- * Thrift client connection +- * @return cassandra client connection +- */ +- private SimpleThriftClient getSimpleThriftClient() +- { +- return new SimpleThriftClient(getRawThriftClient(node.randomNode(), true)); +- } +- +- public Cassandra.Client getRawThriftClient(boolean setKeyspace) +- { +- return getRawThriftClient(node.randomNode(), setKeyspace); +- } +- +- public Cassandra.Client getRawThriftClient(String host) +- { +- return getRawThriftClient(host, true); +- } +- +- public Cassandra.Client getRawThriftClient(String host, boolean setKeyspace) +- { +- Cassandra.Client client; +- +- try +- { +- TTransport transport = this.transport.getFactory().openTransport(host, port.thriftPort); +- +- client = new Cassandra.Client(new TBinaryProtocol(transport)); +- +- if (mode.cqlVersion.isCql()) +- client.set_cql_version(mode.cqlVersion.connectVersion); +- +- if (setKeyspace) +- client.set_keyspace(schema.keyspace); +- +- if (mode.username != null) +- client.login(new AuthenticationRequest(ImmutableMap.of("username", mode.username, "password", mode.password))); +- +- } +- catch (InvalidRequestException e) +- { +- throw new RuntimeException(e.getWhy()); +- } +- catch (Exception e) +- { +- throw new RuntimeException(e); +- } +- +- return client; +- } +- +- + public SimpleClient getSimpleNativeClient() + { + try +diff --git a/tools/stress/src/org/apache/cassandra/stress/util/SimpleThriftClient.java b/tools/stress/src/org/apache/cassandra/stress/util/SimpleThriftClient.java +deleted file mode 100644 +index bb5f4c0..0000000 +--- a/tools/stress/src/org/apache/cassandra/stress/util/SimpleThriftClient.java ++++ /dev/null +@@ -1,111 +0,0 @@ +-package org.apache.cassandra.stress.util; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import java.nio.ByteBuffer; +-import java.util.List; +-import java.util.Map; +- +-import org.apache.cassandra.thrift.*; +-import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.thrift.TException; +- +-public class SimpleThriftClient implements ThriftClient +-{ +- +- final Cassandra.Client client; +- public SimpleThriftClient(Cassandra.Client client) +- { +- this.client = client; +- } +- +- public void batch_mutate(Map>> record, ConsistencyLevel consistencyLevel) throws TException +- { +- client.batch_mutate(record, consistencyLevel); +- } +- +- @Override +- public List get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws TException +- { +- return client.get_slice(key, column_parent, predicate, consistency_level); +- } +- +- @Override +- public List get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws TException +- { +- return client.get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level); +- } +- +- @Override +- public List get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level) throws TException +- { +- return client.get_range_slices(column_parent, predicate, range, consistency_level); +- } +- +- @Override +- public Map> multiget_slice(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws TException +- { +- return client.multiget_slice(keys, column_parent, predicate, consistency_level); +- } +- +- @Override +- public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) throws TException +- { +- client.insert(key, column_parent, column, consistency_level); +- } +- +- @Override +- public Integer prepare_cql3_query(String query, Compression compression) throws TException +- { +- return client.prepare_cql3_query(ByteBufferUtil.bytes(query), compression).itemId; +- } +- +- @Override +- public CqlResult execute_prepared_cql_query(int itemId, ByteBuffer key, List values) throws TException +- { +- return client.execute_prepared_cql_query(itemId, values); +- } +- +- @Override +- public Integer prepare_cql_query(String query, Compression compression) throws InvalidRequestException, TException +- { +- return client.prepare_cql_query(ByteBufferUtil.bytes(query), compression).itemId; +- } +- +- @Override +- public CqlResult execute_cql3_query(String query, ByteBuffer key, Compression compression, ConsistencyLevel consistency) throws TException +- { +- return client.execute_cql3_query(ByteBufferUtil.bytes(query), compression, consistency); +- } +- +- @Override +- public CqlResult execute_prepared_cql3_query(int itemId, ByteBuffer key, List values, ConsistencyLevel consistency) throws TException +- { +- return client.execute_prepared_cql3_query(itemId, values, consistency); +- } +- +- @Override +- public CqlResult execute_cql_query(String query, ByteBuffer key, Compression compression) throws TException +- { +- return client.execute_cql_query(ByteBufferUtil.bytes(query), compression); +- } +-} +diff --git a/tools/stress/src/org/apache/cassandra/stress/util/SmartThriftClient.java b/tools/stress/src/org/apache/cassandra/stress/util/SmartThriftClient.java +deleted file mode 100644 +index babbd7a..0000000 +--- a/tools/stress/src/org/apache/cassandra/stress/util/SmartThriftClient.java ++++ /dev/null +@@ -1,282 +0,0 @@ +-package org.apache.cassandra.stress.util; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import java.net.InetAddress; +-import java.nio.ByteBuffer; +-import java.util.*; +-import java.util.concurrent.ConcurrentHashMap; +-import java.util.concurrent.ConcurrentLinkedQueue; +-import java.util.concurrent.ThreadLocalRandom; +-import java.util.concurrent.atomic.AtomicInteger; +- +-import com.google.common.collect.Iterators; +- +-import com.datastax.driver.core.Host; +-import com.datastax.driver.core.Metadata; +-import org.apache.cassandra.stress.settings.StressSettings; +-import org.apache.cassandra.thrift.*; +-import org.apache.cassandra.utils.ByteBufferUtil; +-import org.apache.thrift.TException; +- +-public class SmartThriftClient implements ThriftClient +-{ +- +- final String keyspace; +- final Metadata metadata; +- final StressSettings settings; +- final ConcurrentHashMap> cache = new ConcurrentHashMap<>(); +- +- final AtomicInteger queryIdCounter = new AtomicInteger(); +- final ConcurrentHashMap queryStrings = new ConcurrentHashMap<>(); +- final ConcurrentHashMap queryIds = new ConcurrentHashMap<>(); +- final Set whiteset; +- final List whitelist; +- +- public SmartThriftClient(StressSettings settings, String keyspace, Metadata metadata) +- { +- this.metadata = metadata; +- this.keyspace = keyspace; +- this.settings = settings; +- if (!settings.node.isWhiteList) +- { +- whiteset = null; +- whitelist = null; +- } +- else +- { +- whiteset = settings.node.resolveAllSpecified(); +- whitelist = Arrays.asList(whiteset.toArray(new InetAddress[0])); +- } +- } +- +- private final AtomicInteger roundrobin = new AtomicInteger(); +- +- private Integer getId(String query) +- { +- Integer r; +- if ((r = queryIds.get(query)) != null) +- return r; +- r = queryIdCounter.incrementAndGet(); +- if (queryIds.putIfAbsent(query, r) == null) +- { +- queryStrings.put(r, query); +- return r; +- } +- return queryIds.get(query); +- } +- +- final class Client +- { +- final Cassandra.Client client; +- final InetAddress server; +- final Map queryMap = new HashMap<>(); +- +- Client(Cassandra.Client client, InetAddress server) +- { +- this.client = client; +- this.server = server; +- } +- +- Integer get(Integer id, boolean cql3) throws TException +- { +- Integer serverId = queryMap.get(id); +- if (serverId != null) +- return serverId; +- prepare(id, cql3); +- return queryMap.get(id); +- } +- +- void prepare(Integer id, boolean cql3) throws TException +- { +- String query; +- while ( null == (query = queryStrings.get(id)) ) ; +- if (cql3) +- { +- Integer serverId = client.prepare_cql3_query(ByteBufferUtil.bytes(query), Compression.NONE).itemId; +- queryMap.put(id, serverId); +- } +- else +- { +- Integer serverId = client.prepare_cql_query(ByteBufferUtil.bytes(query), Compression.NONE).itemId; +- queryMap.put(id, serverId); +- } +- } +- } +- +- private Client get(ByteBuffer pk) +- { +- Set hosts = metadata.getReplicas(metadata.quote(keyspace), pk); +- InetAddress address = null; +- if (hosts.size() > 0) +- { +- int pos = roundrobin.incrementAndGet() % hosts.size(); +- for (int i = 0 ; address == null && i < hosts.size() ; i++) +- { +- if (pos < 0) +- pos = -pos; +- Host host = Iterators.get(hosts.iterator(), (pos + i) % hosts.size()); +- if (whiteset == null || whiteset.contains(host.getAddress())) +- address = host.getAddress(); +- } +- } +- if (address == null) +- address = whitelist.get(ThreadLocalRandom.current().nextInt(whitelist.size())); +- ConcurrentLinkedQueue q = cache.get(address); +- if (q == null) +- { +- ConcurrentLinkedQueue newQ = new ConcurrentLinkedQueue(); +- q = cache.putIfAbsent(address, newQ); +- if (q == null) +- q = newQ; +- } +- Client tclient = q.poll(); +- if (tclient != null) +- return tclient; +- return new Client(settings.getRawThriftClient(address.getHostAddress()), address); +- } +- +- @Override +- public void batch_mutate(Map>> record, ConsistencyLevel consistencyLevel) throws TException +- { +- for (Map.Entry>> e : record.entrySet()) +- { +- Client client = get(e.getKey()); +- try +- { +- client.client.batch_mutate(Collections.singletonMap(e.getKey(), e.getValue()), consistencyLevel); +- } finally +- { +- cache.get(client.server).add(client); +- } +- } +- } +- +- @Override +- public List get_slice(ByteBuffer key, ColumnParent parent, SlicePredicate predicate, ConsistencyLevel consistencyLevel) throws TException +- { +- Client client = get(key); +- try +- { +- return client.client.get_slice(key, parent, predicate, consistencyLevel); +- } finally +- { +- cache.get(client.server).add(client); +- } +- } +- +- @Override +- public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) throws TException +- { +- Client client = get(key); +- try +- { +- client.client.insert(key, column_parent, column, consistency_level); +- } finally +- { +- cache.get(client.server).add(client); +- } +- } +- +- @Override +- public CqlResult execute_cql_query(String query, ByteBuffer key, Compression compression) throws TException +- { +- Client client = get(key); +- try +- { +- return client.client.execute_cql_query(ByteBufferUtil.bytes(query), compression); +- } finally +- { +- cache.get(client.server).add(client); +- } +- } +- +- @Override +- public CqlResult execute_cql3_query(String query, ByteBuffer key, Compression compression, ConsistencyLevel consistency) throws TException +- { +- Client client = get(key); +- try +- { +- return client.client.execute_cql3_query(ByteBufferUtil.bytes(query), compression, consistency); +- } finally +- { +- cache.get(client.server).add(client); +- } +- } +- +- @Override +- public Integer prepare_cql3_query(String query, Compression compression) throws TException +- { +- return getId(query); +- } +- +- @Override +- public CqlResult execute_prepared_cql3_query(int queryId, ByteBuffer key, List values, ConsistencyLevel consistency) throws TException +- { +- Client client = get(key); +- try +- { +- return client.client.execute_prepared_cql3_query(client.get(queryId, true), values, consistency); +- } finally +- { +- cache.get(client.server).add(client); +- } +- } +- +- @Override +- public Integer prepare_cql_query(String query, Compression compression) throws TException +- { +- return getId(query); +- } +- +- @Override +- public CqlResult execute_prepared_cql_query(int queryId, ByteBuffer key, List values) throws TException +- { +- Client client = get(key); +- try +- { +- return client.client.execute_prepared_cql_query(client.get(queryId, true), values); +- } finally +- { +- cache.get(client.server).add(client); +- } +- } +- +- @Override +- public Map> multiget_slice(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws TException +- { +- throw new UnsupportedOperationException(); +- } +- +- @Override +- public List get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level) throws TException +- { +- throw new UnsupportedOperationException(); +- } +- +- @Override +- public List get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws TException +- { +- throw new UnsupportedOperationException(); +- } +- +-} +diff --git a/tools/stress/src/org/apache/cassandra/stress/util/ThriftClient.java b/tools/stress/src/org/apache/cassandra/stress/util/ThriftClient.java +deleted file mode 100644 +index 3b13758..0000000 +--- a/tools/stress/src/org/apache/cassandra/stress/util/ThriftClient.java ++++ /dev/null +@@ -1,57 +0,0 @@ +-package org.apache.cassandra.stress.util; +-/* +- * +- * Licensed to the Apache Software Foundation (ASF) under one +- * or more contributor license agreements. See the NOTICE file +- * distributed with this work for additional information +- * regarding copyright ownership. The ASF licenses this file +- * to you under the Apache License, Version 2.0 (the +- * "License"); you may not use this file except in compliance +- * with the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, +- * software distributed under the License is distributed on an +- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +- * KIND, either express or implied. See the License for the +- * specific language governing permissions and limitations +- * under the License. +- * +- */ +- +- +-import java.nio.ByteBuffer; +-import java.util.List; +-import java.util.Map; +- +-import org.apache.cassandra.thrift.*; +-import org.apache.thrift.TException; +- +-public interface ThriftClient +-{ +- +- public void batch_mutate(Map>> record, ConsistencyLevel consistencyLevel) throws TException; +- +- List get_slice(ByteBuffer key, ColumnParent parent, SlicePredicate predicate, ConsistencyLevel consistencyLevel) throws InvalidRequestException, UnavailableException, TimedOutException, TException; +- +- void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException; +- +- Map> multiget_slice(List keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException; +- +- List get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException; +- +- List get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException; +- +- Integer prepare_cql3_query(String query, Compression compression) throws InvalidRequestException, TException; +- +- CqlResult execute_prepared_cql3_query(int itemId, ByteBuffer key, List values, ConsistencyLevel consistency) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException; +- +- CqlResult execute_cql_query(String query, ByteBuffer key, Compression compression) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException; +- +- CqlResult execute_cql3_query(String query, ByteBuffer key, Compression compression, ConsistencyLevel consistency) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException; +- +- Integer prepare_cql_query(String query, Compression compression) throws InvalidRequestException, TException; +- +- CqlResult execute_prepared_cql_query(int itemId, ByteBuffer key, List values) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException; +-} +-- +2.9.3 + diff --git a/cassandra-3.9-scripts.patch b/cassandra-3.9-scripts.patch index b1d58a3..cb505bb 100644 --- a/cassandra-3.9-scripts.patch +++ b/cassandra-3.9-scripts.patch @@ -108,7 +108,7 @@ diff -urN cassandra-3.9/bin/cassandra.in.sh cassandra-3.9new/bin/cassandra.in.sh -for jar in "$CASSANDRA_HOME"/lib/*.jar; do - CLASSPATH="$CLASSPATH:$jar" -done -+DEPS=( cassandra/cassandra-all metrics slf4j/api guava snakeyaml commons-codec jackson/jackson-mapper-asl jackson/jackson-core-asl netty/netty-all lz4 logback/logback-classic logback/logback-core jna sigar high-scale-lib/high-scale-lib antlr3-runtime concurrentlinkedhashmap-lru commons-lang3 json_simple stream-lib caffeine cassandra/cassandra-thrift libthrift snappy-java jBCrypt jctools ) ++DEPS=( cassandra/cassandra-all metrics slf4j/api guava snakeyaml commons-codec jackson/jackson-mapper-asl jackson/jackson-core-asl netty/netty-all lz4 logback/logback-classic logback/logback-core jna sigar high-scale-lib/high-scale-lib antlr3-runtime concurrentlinkedhashmap-lru commons-lang3 json_simple stream-lib caffeine snappy-java jBCrypt jctools ) -# JSR223 - collect all JSR223 engines' jars -for jsr223jar in "$CASSANDRA_HOME"/lib/jsr223/*/*.jar; do diff --git a/cassandra.spec b/cassandra.spec index 259b65c..fa27433 100644 --- a/cassandra.spec +++ b/cassandra.spec @@ -4,14 +4,13 @@ # fedora reserved UID and GID for cassandra %global gid_uid 143 -%{!?thrift:%global thrift 0} %{!?stress:%global stress 0} %global cqlsh_version 5.0.1 Name: %{?scl_prefix}cassandra Version: 3.9 -Release: 6%{?dist} +Release: 7%{?dist} Summary: Client utilities for %{pkg_name} # Apache (v2.0) BSD (3 clause): # ./src/java/org/apache/cassandra/utils/vint/VIntCoding.java @@ -24,7 +23,6 @@ Source3: %{pkg_name}-tmpfile # pom files are not generated but used are the ones from mavencentral # because of orphaned maven-ant-task package doing the work in this case Source4: http://central.maven.org/maven2/org/apache/%{pkg_name}/%{pkg_name}-all/%{version}/%{pkg_name}-all-%{version}.pom -Source5: http://central.maven.org/maven2/org/apache/%{pkg_name}/%{pkg_name}-thrift/%{version}/%{pkg_name}-thrift-%{version}.pom Source6: http://central.maven.org/maven2/org/apache/%{pkg_name}/%{pkg_name}-clientutil/%{version}/%{pkg_name}-clientutil-%{version}.pom Source7: http://central.maven.org/maven2/org/apache/%{pkg_name}/%{pkg_name}-parent/%{version}/%{pkg_name}-parent-%{version}.pom @@ -39,15 +37,15 @@ Patch2: %{pkg_name}-%{version}-scripts.patch # remove "Open" infix from all hppc classes # https://issues.apache.org/jira/browse/CASSANDRA-12995X Patch3: %{pkg_name}-%{version}-hppc.patch -# changes autoclosable issue with TTransport in thrift -# https://bugzilla.redhat.com/show_bug.cgi?id=1183877 -Patch4: %{pkg_name}-%{version}-thrift.patch # add two more parameters for SubstituteLogger constructor in slf4j # https://issues.apache.org/jira/browse/CASSANDRA-12996 Patch5: %{pkg_name}-%{version}-slf4j.patch # remove net.mintern:primitive as it will be removed in next upstream release # https://github.com/apache/cassandra/commit/8f0d5a295d34972ef719574df4aa1b59bf9e8478 Patch6: %{pkg_name}-%{version}-remove-primitive.patch +# remove thrift as it will be removed in next upstream major release +# https://github.com/apache/cassandra/commit/4881d9c308ccd6b5ca70925bf6ebedb70e7705fc +Patch7: %{pkg_name}-%{version}-remove-thrift.patch # TODO #BuildArchitectures: noarch @@ -80,7 +78,6 @@ BuildRequires: %{?scl_prefix}concurrent-trees BuildRequires: %{?scl_prefix}logback BuildRequires: %{?scl_prefix}metrics-reporter-config BuildRequires: %{?scl_prefix}compress-lzf -BuildRequires: %{?scl_prefix}disruptor-thrift-server BuildRequires: %{?scl_prefix}airline BuildRequires: %{?scl_prefix}jmh BuildRequires: %{?scl_prefix}byteman @@ -90,6 +87,8 @@ BuildRequires: %{?scl_prefix}jackson BuildRequires: %{?scl_prefix}antlr3-tool BuildRequires: %{?scl_prefix}caffeine BuildRequires: %{?scl_prefix}hppc +BuildRequires: %{?scl_prefix}lz4-java +BuildRequires: %{?scl_prefix}snappy-java # using high-scale-lib from stephenc, no Cassandra original #BuildRequires: mvn(com.boundary:high-scale-lib) BuildRequires: %{?scl_prefix}high-scale-lib @@ -108,12 +107,8 @@ BuildRequires: %{?scl_prefix_java_common}jcl-over-slf4j BuildRequires: %{?scl_prefix_java_common}ant-junit # in rh-java-common: 4.0.28, needed: 4.0.39.Final BuildRequires: %{?scl_prefix_java_common}netty -# in cassandra39: 0.9.1, needed: 0.9.2 -BuildRequires: %{?scl_prefix}libthrift-java # TODO BuildRequires: %{?scl_prefix}cassandra-java-driver -BuildRequires: %{?scl_prefix}lz4-java -BuildRequires: %{?scl_prefix}snappy-java BuildRequires: %{?scl_prefix}ohc BuildRequires: %{?scl_prefix}ohc-core-j8 # the SCL version of the package depends on rh-maven33 collection @@ -157,17 +152,6 @@ Summary: Parent POM for %{pkg_name} %description parent Parent POM for %{pkg_name}. -%if %thrift -%package thrift -Summary: Thrift for %{pkg_name} -Requires: %{pkg_name} = %{version}-%{release} - -%description thrift -Allows portable (across programming languages) access to the database. Thrift -accomplishes this by generated source code for the programming language in -question based on a Thrift IDL file describing the service. -%endif - # source codes of cqlshlib are not python3 compatible, therefore using python2 %package python2-cqlshlib Summary: Python cqlsh library for %{pkg_name} @@ -201,6 +185,9 @@ This package contains the API documentation for %{pkg_name}. cp -pr %{pkg_name}-%{pkg_name}-%{version}/* . rm -r %{pkg_name}-%{pkg_name}-%{version} +# remove thrift patch +%patch7 -p1 + # remove binary and library files find -name "*.class" -print -delete find -name "*.jar" -print -delete @@ -221,40 +208,14 @@ find -name "*py.class" -print -delete # copy pom files mkdir build cp -p %{SOURCE4} build/%{pkg_name}-%{version}.pom -cp -p %{SOURCE5} build/%{pkg_name}-thrift-%{version}.pom cp -p %{SOURCE6} build/%{pkg_name}-clientutil-%{version}.pom cp -p %{SOURCE7} build/%{pkg_name}-%{version}-parent.pom -# remove hadoop -rm src/java/org/apache/cassandra/client/RingCache.java -rm -r src/java/org/apache/cassandra/hadoop -rm test/unit/org/apache/cassandra/client/TestRingCache.java -rm test/unit/org/apache/cassandra/hadoop/ColumnFamilyInputFormatTest.java -# remove hadoop also from pom files -%pom_remove_dep -r org.apache.hadoop: build/%{pkg_name}-%{version}.pom - -# remove shaded classifier in cassandra driver from pom files -%pom_xpath_remove "pom:dependencies/pom:dependency/pom:classifier" build/%{pkg_name}-%{version}.pom - -# TRY remove cassandra-java-driver -#%%pom_remove_dep -r com.datastax.cassandra:cassandra-driver-core build/%%{pkg_name}-%%{version}.pom -#rm src/java/org/apache/cassandra/cql3/functions/UDFunction.java -#rm src/java/org/apache/cassandra/cql3/functions/UDFContext.java -#rm src/java/org/apache/cassandra/cql3/functions/JavaBasedUDFunction.java -#rm src/java/org/apache/cassandra/cql3/functions/JavaUDF.java -#rm src/java/org/apache/cassandra/cql3/functions/UDFContextImpl.java -#rm src/java/org/apache/cassandra/cql3/functions/UDHelper.java -#rm src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java -#rm src/java/org/apache/cassandra/tools/BulkLoader.java -#rm src/java/org/apache/cassandra/tools/LoaderOptions.java -#rm src/java/org/apache/cassandra/utils/NativeSSTableLoaderClient.java - # build jar repositories for dependencies build-jar-repository lib antlr3 build-jar-repository lib stringtemplate4 build-jar-repository lib jsr-305 build-jar-repository lib commons-lang3 -build-jar-repository lib libthrift build-jar-repository lib slf4j/api build-jar-repository lib guava build-jar-repository lib jamm @@ -292,7 +253,6 @@ build-jar-repository lib metrics-reporter-config/reporter-config build-jar-repository lib metrics-reporter-config/reporter-config-base build-jar-repository lib joda-time build-jar-repository lib compress-lzf -build-jar-repository lib disruptor-thrift-server build-jar-repository lib commons-cli build-jar-repository lib airline build-jar-repository lib jna @@ -324,25 +284,29 @@ build-jar-repository lib javax.inject %patch2 -p1 # hppc patch %patch3 -p1 -# thrift patch -%patch4 -p1 # slf4j patch %patch5 -p1 # remove primitive patch %patch6 -p1 %{?scl:scl enable %{scl_maven} %{scl} - << "EOF"} +# remove hadoop +rm -r src/java/org/apache/cassandra/hadoop +# remove hadoop also from pom files +%pom_remove_dep -r org.apache.hadoop: build/%{pkg_name}-%{version}.pom + +# remove shaded classifier in cassandra driver from pom files +%pom_xpath_remove "pom:dependencies/pom:dependency/pom:classifier" build/%{pkg_name}-%{version}.pom + # update dependencies in the downloaded pom files to those being actually used %pom_change_dep com.boundary: com.github.stephenc.high-scale-lib: build/%{pkg_name}-%{version}.pom -%pom_change_dep com.github.rholder:snowball-stemmer org.tartarus:snowball build/%{pkg_name}-thrift-%{version}.pom -# remove primitve as a dependency -%pom_remove_dep -r :primitive build/%{pkg_name}-thrift-%{version}.pom +# remove thrift dependencies from the downloaded pom files +%pom_remove_dep -r com.thinkaurelius.thrift:thrift-server build/%{pkg_name}-%{version}.pom +%pom_remove_dep -r org.apache.cassandra:cassandra-thrift build/%{pkg_name}-%{version}.pom +%pom_remove_dep -r org.apache.thrift:libthrift build/%{pkg_name}-%{version}.pom %mvn_package "org.apache.%{pkg_name}:%{pkg_name}-parent:pom:%{version}" parent -%if %thrift -%mvn_package ":%{pkg_name}-thrift" thrift -%endif %mvn_package ":%{pkg_name}-clientutil" client %if %stress %mvn_package ":%{pkg_name}-stress" stress @@ -363,7 +327,6 @@ popd %{?scl:scl enable %{scl_maven} %{scl} - << "EOF"} %mvn_artifact build/%{pkg_name}-%{version}-parent.pom %mvn_artifact build/%{pkg_name}-%{version}.pom build/%{pkg_name}-%{version}.jar -%mvn_artifact build/%{pkg_name}-thrift-%{version}.pom build/%{pkg_name}-thrift-%{version}.jar %mvn_artifact build/%{pkg_name}-clientutil-%{version}.pom build/%{pkg_name}-clientutil-%{version}.jar %if %stress %mvn_artifact org.apache.%{pkg_name}:%{pkg_name}-stress:%{version} build/tools/lib/%{pkg_name}-stress.jar @@ -482,11 +445,6 @@ exit 0 %files parent -f .mfiles-parent %license LICENSE.txt NOTICE.txt -%if %thrift -%files thrift -f .mfiles-thrift -%license LICENSE.txt NOTICE.txt -%endif - %files python2-cqlshlib %license LICENSE.txt NOTICE.txt %{python2_sitearch}/cqlshlib @@ -504,6 +462,9 @@ exit 0 %license LICENSE.txt NOTICE.txt %changelog +* Tue Mar 28 2017 Tomas Repik - 3.9-7 +- remove thrift from 3.9 applying mainly upstream patch + * Mon Mar 20 2017 Tomas Repik - 3.9-6 - require airline and change permissions for config files