
    INSERT
"""""""""""

INSERT INTO climate.data(datum, station, data) VALUES ('1934-1-1', '295740', ARRAY[-1250, 400, 99990, 0, 102350, 400, 99990, 0, 260, 400, 1050, 400, 1810, 99990, -889, -2000, 0, 99990, 0]);

o Calls climate_insert_data_trigger (DDL) executes
    o Eexecutes climate.data_insert_trigger() (DDL)

        INSERT INTO climate.data_temp VALUES (NEW.*); -- Inserts the value to be inserted into climate.data_temp table

        -- Update Insert stats (times are similar to EXPLAIN ANALYZE)     -- // nepresne (ala Schrödinger), vysvetlim
        

        ~~ NOTIFY/dbms_alert 'retin_climate' once a #batchInsert inserts (volitelne, neni soucast SQL:2003 standardu)



    ReTIN functionality - count statistics and decide wheather to reindex
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""

o Init (Constructor)
    -- prepare connection, ...
    ~~ LISTEN 'retin_climate';
    ~~ EXECUTE dbms_alert.register('retin_climate');

    maxRTimeout := 500;     -- the real-time constraint - no query can last longer (as the time unit we use millisecond by default)
    batchInsert := 1000;    -- the physical table shouldn't hold less rows than #batchInsert


o Calls stats_cycle()                                            -- once
    INSERT INTO climate.data_meta(id) VALUES (0);                -- initialize
    insert_count_prev := 0;

    WHILE (true) LOOP
        -- get asynchronous notifications 'retin_climate' or wait the user-predefined time
        ~~ EXECUTE dbms_alert.waitany(NULL);

        meta := SELECT * FROM climate.data_meta WHERE id = 0 INTO;

        -- if there was really the #batchInsert rows newly inserted
        IF (meta.insert_count - insert_count_prev >= batchIndex) THEN

            IF (meta.rt_first == NULL)                                  -- init
                meta.rt_first := SELECT min(rt_id) FROM climate.data_temp INTO;     -- update
            END IF;

            meta.rt_last := SELECT max(rt_id) FROM climate.data_temp INTO;  -- update

            -- adjust the sleep time (if there is no dbms_alert/NOTIFY)
            sleep_time = ((meta.rt_last - meta.rt_first)/insert_count) * batchIndex/3; -- wake up about 3 times before #batchInserts (sampling theorem)

            -- count insertion stats
            insert_mean := meta.insert_millis / meta.insert_count;
            insert_stdev := sqrt( (meta.insert_stdev / meta.insert_count) - (insert_mean * insert_mean) );

            -- issue a SERIOUS WARNING if there is not sure the system can insert data in real-time
            IF (insert_mean + 4*insert_stdev > maxRTimeout) THEN
                RAISE WARNING 'The insert may not suffice the RT condition. It may last up to % milliceconds with probability > 0.006%.', insert_mean + 4*insert_stdev;
            END IF;

            -- measure query duration (user-eligible queries)
            query_index := EXECUTE queryDuration('SELECT * FROM climate.data_index WHERE ...') INTO;
            query_temp := EXECUTE queryDuration('SELECT * FROM climate.data_temp WHERE ...') INTO;
            -- update query statistics
            meta.query_count += 1;                      -- update
            meta.query_index_millis += query_index;
            meta.query_index_stdev += query_index*query_index;
            meta.query_temp_millis += query_temp;
            meta.query_temp_stdev += query_temp*query_temp;
            -- and count mean and standard deviations
            index_mean := meta.query_index_millis / meta.query_count;
            index_stdev := sqrt( (meta.query_index_stdev / meta.query_count) - (index_mean * index_mean) );
            temp_mean := meta.query_temp_millis / meta.query_count;
            temp_stdev := sqrt( (meta.query_temp_stdev / meta.query_count) - (temp_mean * temp_mean) );

            -- RT reindexing needed whether:
            IF (index_mean + 4*index_stdev > maxRTimeout OR     -- indexed window too large, cannot be 99.999% sure to be RT - shrink the window    ~~ tohle se mi jeste nestalo
                temp_mean >= index_mean ) THEN                  -- the temporary queries last longer than indexed (to achieve the optimality)       ~~ tohle se deje porad

                -- update the final statistics
                meta.query_index_millis := index_mean;
                meta.query_index_stdev := index_stdev;
                meta.query_temp_millis := temp_mean;
                meta.query_temp_stdev := temp_stdev;

                -- perform the reindexing
                EXECUTE reindex();
            END IF;

        END IF;

        -- last insert count changed
        insert_count_prev := meta.insert_count;

        COMMIT;             -- the transaction (there might be more commits proceeded)

        WAIT sleep_time;     -- pasively wait for the guessed time if there are no NOTIFY/dbms_allerts

    END LOOP;   -- "never" happens



    Once it should be reindexed
""""""""""""""""""""""""""""""""
    meta := SELECT * FROM climate.data_meta WHERE id=0 INTO;

    -- update the id of the 0th metadata row
    meta.id := SELECT max(id)+1 FROM climate.data_meta INTO;

    -- insert a new 0th metadata row (for the purpose of counting statistics)
    INSERT INTO climate.data_meta(id, rt_first) VALUES(0, meta.rt_last);

    -- don't block the metadata table more than necessary, but keep the meta.id's values (the climate.data_insert_trigger() changes just row 0)
    COMMIT;
    -- a rollback in case of an exception goes here; it might be necessary to fix it - update the rt_first on the 0th row and delete the meta.id's row

    -- crete a new partition AS a part of the data_temp table
    CREATE TABLE climate.data_|| meta.id || AS SELECT * FROM climate.data_temp WHERE rt_id < meta.rt_last;

    -- add constraints (user-defined except the rt_id CHECK)
    ALTER TABLE climate.data_|| meta.id || ADD CONSTRAINT climate_data_|| meta.id ||_check CHECK ( rt_id >= meta.rt_first AND rt_id < meta.rt_last );
    ALTER TABLE climate.data_|| meta.id || ADD CONSTRAINT climate_data_|| meta.id ||_pk PRIMARY KEY (datum, station);
    ALTER TABLE climate.data_|| meta.id || ADD CONSTRAINT climate_data_|| meta.id ||_stations FOREIGN KEY (station) REFERENCES climate.stations (id) MATCH SIMPLE ON UPDATE RESTRICT ON DELETE RESTRICT;

    -- add indexes (user-defined except the rt_id btree)
    climate_data_|| meta.id ||_rt_index ON climate.data_|| meta.id ||(rt_id);
    climate_data_|| meta.id ||_gin ON climate.data_|| meta.id || USING gin(data);

    -- find the lastest indexed field
    index_first := SELECT index_first FROM climate.data_meta WHERE id=(meta.id - 1) INTO;

    -- indexed window too large, cannot be 99.999% sure to be RT - shrink the window (remove the inheritance from old partitions of data and data_index tables)
    IF (index_mean + 4*index_stdev > maxRTimeout) THEN
        FOR i IN index_fisrt .. index_fisrt+1 LOOP
            ALTER TABLE climate.data_|| meta.id || NO INHERIT climate.data;      -- we can drop these physical partitions too (just keep the metadata)
            ~~ (takhle to dělámv Postgres... Oracle viz http://download-west.oracle.com/docs/cd/B28359_01/server.111/b32024/partition.htm )
        END LOOP;

        index_first -= 2; -- we're adding one partition, if a shrink needed we must drop two old partitions
    END IF;

    -- a temporary index table (because we don't want to lock the index_table)
    CREATE TABLE climate.data_itempex LIKE climate.data;

    -- the global abstract table climata.data_itempex inherits its partitions
    FOR i IN meta.index_fisrt .. meta.id LOOP
        ALTER TABLE climate.data_|| meta.id || INHERIT climata.data_itempex;
    END LOOP;

    // select the very first inserted rt_id value
    rt_very_first := SELECT index_first FROM climate.data_meta WHERE id=1 INTO;

    -- if we want to add global constraints (user-defined except the rt_id CHECK)
    ALTER TABLE climate.data_|| meta.id || ADD CONSTRAINT climate_data_|| meta.id ||_check CHECK ( rt_id >= rt_very_first AND rt_id < meta.rt_last );
    ALTER TABLE climate.data_|| meta.id || ADD CONSTRAINT climate_data_|| meta.id ||_pk PRIMARY KEY (datum, station);
    ALTER TABLE climate.data_|| meta.id || ADD CONSTRAINT climate_data_|| meta.id ||_stations FOREIGN KEY (station) REFERENCES climate.stations (id) MATCH SIMPLE ON UPDATE RESTRICT ON DELETE RESTRICT;

    -- if we want to add global indexes (user-defined except the rt_id btree)
    climate_data_|| meta.id ||_rt_index ON climate.data_|| meta.id ||(rt_id);
    climate_data_|| meta.id ||_gin ON climate.data_|| meta.id || USING gin(data);

    -- we can commit again
    COMMIT;

    -- delete duplicite values from temporary table
    DELETE FROM climate.data_temp WHERE rt_id < meta.rt_last;

    -- add the new partition to the virtual data table (if needed)
    ALTER TABLE climate.data_|| meta.id || INHERIT climate.data;

    -- swap imdex tables - drop the previous index table
    DROP TABLE climate.data_index;
    -- ALTER TABLE climate.data_|| meta.id || NO INHERIT climate.data_index; -- presuming this works as a cascade

    -- finally, replace the index table by the newly created
    ALTER TABLE  climate.data_itempex RENAME TO data_index;

    COMMIT;   -- finally




    Queries
""""""""""""

    SELECT climate.querycount('SELECT count(*) FROM climate.data WHERE data @> ARRAY[400, 800]');

    - similar to:
    EXPLAIN ANALYZE SELECT count(*) FROM climate.data WHERE data @> ARRAY[400, 800];

    - execute the query FROM tables:
    o data_temp         -- selects only unindexed temporary data (Seq scan)
    o data_index        -- selects only indexed data (CHECK table constraints, Index Scan, Filter)
    o data              -- selects both (altogether)












/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
    Zbytek je jen tak, any se to nikam neztratilo
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

SELECT climate.querycount('SELECT count(*) FROM climate.data WHERE data @> ARRAY[400, 800]');

EXPLAIN ANALYZE SELECT count(*) FROM climate.data WHERE data @> ARRAY[400, 800];


"Aggregate  (cost=204.93..204.94 rows=1 width=0) (actual time=14.393..14.393 rows=1 loops=1)"
"  ->  Append  (cost=0.00..204.89 rows=16 width=0) (actual time=0.024..12.427 rows=5061 loops=1)"
"        ->  Seq Scan on data  (cost=0.00..12.57 rows=1 width=0) (actual time=0.022..0.495 rows=194 loops=1)"
"              Filter: (data @> '{400}'::integer[])"
"        ->  Seq Scan on data_temp data  (cost=0.00..98.79 rows=3 width=0) (actual time=0.010..4.649 rows=1877 loops=1)"
"              Filter: (data @> '{400}'::integer[])"
"        ->  Seq Scan on data_1 data  (cost=0.00..6.61 rows=1 width=0) (actual time=0.016..0.398 rows=158 loops=1)"
"              Filter: (data @> '{400}'::integer[])"
"        ->  Bitmap Heap Scan on data_2 data  (cost=4.26..8.27 rows=1 width=0) (actual time=0.164..0.332 rows=311 loops=1)"
"              Recheck Cond: (data @> '{400}'::integer[])"
"              ->  Bitmap Index Scan on climate_data_2_gin  (cost=0.00..4.26 rows=1 width=0) (actual time=0.148..0.148 rows=311 loops=1)"
"                    Index Cond: (data @> '{400}'::integer[])"
"        ->  Bitmap Heap Scan on data_3 data  (cost=4.26..8.27 rows=1 width=0) (actual time=0.178..0.384 rows=375 loops=1)"
"              Recheck Cond: (data @> '{400}'::integer[])"
"              ->  Bitmap Index Scan on climate_data_3_gin  (cost=0.00..4.26 rows=1 width=0) (actual time=0.163..0.163 rows=375 loops=1)"
"                    Index Cond: (data @> '{400}'::integer[])"
"        ->  Seq Scan on data_4 data  (cost=0.00..8.34 rows=1 width=0) (actual time=0.013..0.507 rows=267 loops=1)"
"              Filter: (data @> '{400}'::integer[])"
"        ->  Seq Scan on data_12 data  (cost=0.00..7.81 rows=1 width=0) (actual time=0.010..0.424 rows=223 loops=1)"
"              Filter: (data @> '{400}'::integer[])"
"        ->  Seq Scan on data_5 data  (cost=0.00..6.54 rows=1 width=0) (actual time=0.011..0.393 rows=203 loops=1)"
"              Filter: (data @> '{400}'::integer[])"
"        ->  Seq Scan on data_6 data  (cost=0.00..8.28 rows=1 width=0) (actual time=0.010..0.500 rows=262 loops=1)"
"              Filter: (data @> '{400}'::integer[])"
"        ->  Bitmap Heap Scan on data_7 data  (cost=4.26..8.27 rows=1 width=0) (actual time=0.168..0.397 rows=356 loops=1)"
"              Recheck Cond: (data @> '{400}'::integer[])"
"              ->  Bitmap Index Scan on climate_data_7_gin  (cost=0.00..4.26 rows=1 width=0) (actual time=0.153..0.153 rows=356 loops=1)"
"                    Index Cond: (data @> '{400}'::integer[])"
"        ->  Seq Scan on data_8 data  (cost=0.00..6.65 rows=1 width=0) (actual time=0.012..0.400 rows=145 loops=1)"
"              Filter: (data @> '{400}'::integer[])"
"        ->  Bitmap Heap Scan on data_9 data  (cost=4.26..8.27 rows=1 width=0) (actual time=0.127..0.276 rows=252 loops=1)"
"              Recheck Cond: (data @> '{400}'::integer[])"
"              ->  Bitmap Index Scan on climate_data_9_gin  (cost=0.00..4.26 rows=1 width=0) (actual time=0.112..0.112 rows=252 loops=1)"
"                    Index Cond: (data @> '{400}'::integer[])"
"        ->  Seq Scan on data_10 data  (cost=0.00..7.95 rows=1 width=0) (actual time=0.012..0.438 rows=168 loops=1)"
"              Filter: (data @> '{400}'::integer[])"
"        ->  Bitmap Heap Scan on data_11 data  (cost=4.26..8.27 rows=1 width=0) (actual time=0.132..0.288 rows=270 loops=1)"
"              Recheck Cond: (data @> '{400}'::integer[])"
"              ->  Bitmap Index Scan on climate_data_11_gin  (cost=0.00..4.26 rows=1 width=0) (actual time=0.117..0.117 rows=270 loops=1)"
"                    Index Cond: (data @> '{400}'::integer[])"
"Total runtime: 14.608 ms"




    Oracle functionality: NOTIFY
"""""""""""""""""""""""""""""""""

-- ReTIN
EXEC dbms_alert.register('retin_climate');
WHILE TRUE
	EXEC dbms_alert.waitany(NULL);

-- ReTIN data_insert_trigger
EXEC dbms_alert.signal('retin_climate','block_inserted');

