Java Code Examples for org.apache.cassandra.db.AbstractReadCommandBuilder

Following code examples demonstrate how to use org.apache.cassandra.db.AbstractReadCommandBuilder from cassandra. These examples are extracted from various highly rated open source projects. You can directly use these code snippets or view their entire linked source code. These snippets are extracted to provide contextual information about how to use this class in the real world. These samples also let you understand some good practices on how to use org.apache.cassandra.db.AbstractReadCommandBuilder and various code implementation of this class.
Example 1
Project : cassandra Source File : ThrottledUnfilteredIteratorTest.java View Source Code on GitHub

    @Test
    public void testThrottledIteratorWithRangeDeletions() throws Exception
    {
        Keyspace keyspace = Keyspace.open(KSNAME);
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CFNAME);

        String key = "k1";

        UpdateBuilder builder;

        builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
        for (int i = 0; i < 40; i += 2)
            builder.newRow(i).add("val", i);
        builder.applyUnsafe();

        new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(10, 22).build().applyUnsafe();

        cfs.forceBlockingFlush();

        builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(2);
        for (int i = 1; i < 40; i += 2)
            builder.newRow(i).add("val", i);
        builder.applyUnsafe();

        new RowUpdateBuilder(cfs.metadata(), 3, key).addRangeTombstone(19, 27).build().applyUnsafe();

        int[] live = new int[]{ 4, 9, 11, 17, 28 };
        int[] dead = new int[]{ 12, 19, 21, 24, 27 };

        AbstractReadCommandBuilder.PartitionRangeBuilder cmdBuilder = Util.cmd(cfs);

        ReadCommand cmd = cmdBuilder.build();

        for (int batchSize = 2; batchSize <= 40; batchSize++)
        {
            List<UnfilteredRowIterator> unfilteredRowIterators = new LinkedList<>();

            try (ReadExecutionController executionController = cmd.executionController();
                 UnfilteredPartitionIterator iterator = cmd.executeLocally(executionController))
            {
                assertTrue(iterator.hasNext());
                Iterator<UnfilteredRowIterator> throttled = ThrottledUnfilteredIterator.throttle(iterator, batchSize);
                while (throttled.hasNext())
                {
                    UnfilteredRowIterator next = throttled.next();
                    ImmutableBTreePartition materializedPartition = ImmutableBTreePartition.create(next);
                    int unfilteredCount = Iterators.size(materializedPartition.unfilteredIterator());

                    System.out.println("batchsize " + batchSize + " unfilteredCount " + unfilteredCount + " materializedPartition " + materializedPartition);

                    if (throttled.hasNext())
                    {
                        if (unfilteredCount != batchSize)
                        {
                            assertEquals(batchSize + 1, unfilteredCount);
                            Unfiltered last = Iterators.getLast(materializedPartition.unfilteredIterator());
                            assertTrue(last.isRangeTombstoneMarker());
                            RangeTombstoneMarker marker = (RangeTombstoneMarker) last;
                            assertFalse(marker.isBoundary());
                            assertTrue(marker.isClose(false));
                        }
                    }
                    else
                    {
                        assertTrue(unfilteredCount <= batchSize + 1);
                    }
                    unfilteredRowIterators.add(materializedPartition.unfilteredIterator());
                }
                assertFalse(iterator.hasNext());
            }

            Partition partition = ImmutableBTreePartition.create(UnfilteredRowIterators.merge(unfilteredRowIterators));

            int nowInSec = FBUtilities.nowInSeconds();

            for (int i : live)
                assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(ByteBufferUtil.bytes((i)))).hasLiveData(nowInSec, cfs.metadata().enforceStrictLiveness()));
            for (int i : dead)
                assertFalse("Row " + i + " shouldn't be live", partition.getRow(Clustering.make(ByteBufferUtil.bytes((i)))).hasLiveData(nowInSec, cfs.metadata().enforceStrictLiveness()));
        }
    }
			
Example 2
Project : cassandra Source File : SanityCheckUtils.java View Source Code on GitHub

    
    public static RowIteratorSanityCheck.Report checkSanity(ColumnFamilyStore cfs, boolean randomStartToken, long limit, boolean verbose)
    {
        int nowInSecond = FBUtilities.nowInSeconds();
        Token fromToken = randomStartToken ? cfs.metadata.partitioner.getRandomToken() : cfs.metadata.partitioner.getMinimumToken();
        InternalPartitionRangeReadCommand command = new InternalPartitionRangeReadCommand(
            (PartitionRangeReadCommand) (new AbstractReadCommandBuilder.PartitionRangeBuilder(cfs).fromToken(fromToken, true))
                                        .withNowInSeconds(nowInSecond).build());
        ReadOrderGroup orderGroup = command.startOrderGroup();
        UnfilteredPartitionIterator partitionIterator = command.queryStorageInternal(cfs, orderGroup);
        RowIteratorSanityCheck check = new RowIteratorSanityCheck(cfs.metadata, fromToken, nowInSecond, verbose);
        long count = 0;
        while (partitionIterator.hasNext())
        {
            UnfilteredRowIterator cassandraRowIterator = partitionIterator.next();

            UnfilteredRowIterator rocksdbRowIterator = cfs.engine.queryStorage(
               cfs,
               SinglePartitionReadCommand.fullPartitionRead(cfs.metadata, nowInSecond,
                                                            cassandraRowIterator.partitionKey()));
            check.compare(cassandraRowIterator.partitionKey(), cassandraRowIterator, rocksdbRowIterator);

            count ++;
            if (limit > 0 && count >= limit)
                break;
        }
        return check.getReport();
    }
			
Example 3
Project : cassandra Source File : StreamingConsistencyCheckUtils.java View Source Code on GitHub

    
    public static Set<Integer> check(ColumnFamilyStore cfs, int expectedNumberOfKeys)
    {
        assert(cfs.metadata.partitionColumns().size() == 1);
        assert(cfs.metadata.partitionColumns().iterator().next().cellValueType() instanceof Int32Type);

        Set<Integer> missingKeys = new HashSet<>();
        for (int primaryKey = 0 ; primaryKey < expectedNumberOfKeys; primaryKey ++)
        {
            ReadCommand command = new AbstractReadCommandBuilder.SinglePartitionBuilder(
                cfs,
                cfs.decorateKey(Int32Serializer.instance.serialize(primaryKey))).build();
            PartitionIterator partitionIterator = command.execute(ConsistencyLevel.LOCAL_ONE, null);
            if (! partitionIterator.hasNext() || !partitionIterator.next().hasNext())
                missingKeys.add(primaryKey);
        }
        return missingKeys;
    }