core.hpp 80.9 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018-2020 Intel Corporation


#ifndef OPENCV_GAPI_CORE_HPP
#define OPENCV_GAPI_CORE_HPP

#include <math.h>

#include <utility> // std::tuple

#include <opencv2/imgproc.hpp>

#include <opencv2/gapi/gmat.hpp>
#include <opencv2/gapi/gscalar.hpp>
#include <opencv2/gapi/gkernel.hpp>
#include <opencv2/gapi/streaming/format.hpp>

/** \defgroup gapi_core G-API Core functionality
@{
    @defgroup gapi_math Graph API: Math operations
    @defgroup gapi_pixelwise Graph API: Pixelwise operations
    @defgroup gapi_matrixop Graph API: Operations on matrices
    @defgroup gapi_transform Graph API: Image and channel composition functions
@}
 */

namespace cv { namespace gapi {
namespace core {
    using GMat2 = std::tuple<GMat,GMat>;
    using GMat3 = std::tuple<GMat,GMat,GMat>; // FIXME: how to avoid this?
    using GMat4 = std::tuple<GMat,GMat,GMat,GMat>;
    using GMatScalar  = std::tuple<GMat, GScalar>;

    G_TYPED_KERNEL(GAdd, <GMat(GMat, GMat, int)>, "org.opencv.core.math.add") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc b, int ddepth) {
            if (ddepth == -1)
            {
                // OpenCV: When the input arrays in add/subtract/multiply/divide
                // functions have different depths, the output array depth must be
                // explicitly specified!
                // See artim_op() @ arithm.cpp
                GAPI_Assert(a.chan == b.chan);
                GAPI_Assert(a.depth == b.depth);
                return a;
            }
            return a.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GAddC, <GMat(GMat, GScalar, int)>, "org.opencv.core.math.addC") {
        static GMatDesc outMeta(GMatDesc a, GScalarDesc, int ddepth) {
            return a.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GSub, <GMat(GMat, GMat, int)>, "org.opencv.core.math.sub") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc b, int ddepth) {
            if (ddepth == -1)
            {
                // This macro should select a larger data depth from a and b
                // considering the number of channels in the same
                // FIXME!!! Clarify if it is valid for sub()
                GAPI_Assert(a.chan == b.chan);
                ddepth = std::max(a.depth, b.depth);
            }
            return a.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GSubC, <GMat(GMat, GScalar, int)>, "org.opencv.core.math.subC") {
        static GMatDesc outMeta(GMatDesc a, GScalarDesc, int ddepth) {
            return a.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GSubRC,<GMat(GScalar, GMat, int)>, "org.opencv.core.math.subRC") {
        static GMatDesc outMeta(GScalarDesc, GMatDesc b, int ddepth) {
            return b.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GMul, <GMat(GMat, GMat, double, int)>, "org.opencv.core.math.mul") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc, double, int ddepth) {
            return a.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GMulCOld, <GMat(GMat, double, int)>, "org.opencv.core.math.mulCOld") {
        static GMatDesc outMeta(GMatDesc a, double, int ddepth) {
            return a.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GMulC, <GMat(GMat, GScalar, int)>, "org.opencv.core.math.mulC"){
        static GMatDesc outMeta(GMatDesc a, GScalarDesc, int ddepth) {
            return a.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GMulS, <GMat(GMat, GScalar)>, "org.opencv.core.math.muls") {
        static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
            return a;
        }
    }; // FIXME: Merge with MulC

    G_TYPED_KERNEL(GDiv, <GMat(GMat, GMat, double, int)>, "org.opencv.core.math.div") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc b, double, int ddepth) {
            if (ddepth == -1)
            {
                GAPI_Assert(a.depth == b.depth);
                return b;
            }
            return a.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GDivC, <GMat(GMat, GScalar, double, int)>, "org.opencv.core.math.divC") {
        static GMatDesc outMeta(GMatDesc a, GScalarDesc, double, int ddepth) {
            return a.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GDivRC, <GMat(GScalar, GMat, double, int)>, "org.opencv.core.math.divRC") {
        static GMatDesc outMeta(GScalarDesc, GMatDesc b, double, int ddepth) {
            return b.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GMean, <GScalar(GMat)>, "org.opencv.core.math.mean") {
        static GScalarDesc outMeta(GMatDesc) {
            return empty_scalar_desc();
        }
    };

    G_TYPED_KERNEL_M(GPolarToCart, <GMat2(GMat, GMat, bool)>, "org.opencv.core.math.polarToCart") {
        static std::tuple<GMatDesc, GMatDesc> outMeta(GMatDesc, GMatDesc a, bool) {
            return std::make_tuple(a, a);
        }
    };

    G_TYPED_KERNEL_M(GCartToPolar, <GMat2(GMat, GMat, bool)>, "org.opencv.core.math.cartToPolar") {
        static std::tuple<GMatDesc, GMatDesc> outMeta(GMatDesc x, GMatDesc, bool) {
            return std::make_tuple(x, x);
        }
    };

    G_TYPED_KERNEL(GPhase, <GMat(GMat, GMat, bool)>, "org.opencv.core.math.phase") {
        static GMatDesc outMeta(const GMatDesc &inx, const GMatDesc &, bool) {
            return inx;
        }
    };

    G_TYPED_KERNEL(GMask, <GMat(GMat,GMat)>, "org.opencv.core.pixelwise.mask") {
        static GMatDesc outMeta(GMatDesc in, GMatDesc) {
            return in;
        }
    };

    G_TYPED_KERNEL(GCmpGT, <GMat(GMat, GMat)>, "org.opencv.core.pixelwise.compare.cmpGT") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpGE, <GMat(GMat, GMat)>, "org.opencv.core.pixelwise.compare.cmpGE") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpLE, <GMat(GMat, GMat)>, "org.opencv.core.pixelwise.compare.cmpLE") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpLT, <GMat(GMat, GMat)>, "org.opencv.core.pixelwise.compare.cmpLT") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpEQ, <GMat(GMat, GMat)>, "org.opencv.core.pixelwise.compare.cmpEQ") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpNE, <GMat(GMat, GMat)>, "org.opencv.core.pixelwise.compare.cmpNE") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpGTScalar, <GMat(GMat, GScalar)>, "org.opencv.core.pixelwise.compare.cmpGTScalar"){
        static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpGEScalar, <GMat(GMat, GScalar)>, "org.opencv.core.pixelwise.compare.cmpGEScalar"){
        static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpLEScalar, <GMat(GMat, GScalar)>, "org.opencv.core.pixelwise.compare.cmpLEScalar"){
        static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpLTScalar, <GMat(GMat, GScalar)>, "org.opencv.core.pixelwise.compare.cmpLTScalar"){
    static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpEQScalar, <GMat(GMat, GScalar)>, "org.opencv.core.pixelwise.compare.cmpEQScalar"){
        static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GCmpNEScalar, <GMat(GMat, GScalar)>, "org.opencv.core.pixelwise.compare.cmpNEScalar"){
        static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
            return a.withDepth(CV_8U);
        }
    };

    G_TYPED_KERNEL(GAnd, <GMat(GMat, GMat)>, "org.opencv.core.pixelwise.bitwise_and") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a;
        }
    };

    G_TYPED_KERNEL(GAndS, <GMat(GMat, GScalar)>, "org.opencv.core.pixelwise.bitwise_andS") {
        static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
            return a;
        }
    };

    G_TYPED_KERNEL(GOr, <GMat(GMat, GMat)>, "org.opencv.core.pixelwise.bitwise_or") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a;
        }
    };

    G_TYPED_KERNEL(GOrS, <GMat(GMat, GScalar)>, "org.opencv.core.pixelwise.bitwise_orS") {
        static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
            return a;
        }
    };

    G_TYPED_KERNEL(GXor, <GMat(GMat, GMat)>, "org.opencv.core.pixelwise.bitwise_xor") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a;
        }
    };

    G_TYPED_KERNEL(GXorS, <GMat(GMat, GScalar)>, "org.opencv.core.pixelwise.bitwise_xorS") {
        static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
            return a;
        }
    };

    G_TYPED_KERNEL(GNot, <GMat(GMat)>, "org.opencv.core.pixelwise.bitwise_not") {
        static GMatDesc outMeta(GMatDesc a) {
            return a;
        }
    };

    G_TYPED_KERNEL(GSelect, <GMat(GMat, GMat, GMat)>, "org.opencv.core.pixelwise.select") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc, GMatDesc) {
            return a;
        }
    };

    G_TYPED_KERNEL(GMin, <GMat(GMat, GMat)>, "org.opencv.core.matrixop.min") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a;
        }
    };

    G_TYPED_KERNEL(GMax, <GMat(GMat, GMat)>, "org.opencv.core.matrixop.max") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a;
        }
    };

    G_TYPED_KERNEL(GAbsDiff, <GMat(GMat, GMat)>, "org.opencv.core.matrixop.absdiff") {
        static GMatDesc outMeta(GMatDesc a, GMatDesc) {
            return a;
        }
    };

    G_TYPED_KERNEL(GAbsDiffC, <GMat(GMat,GScalar)>, "org.opencv.core.matrixop.absdiffC") {
        static GMatDesc outMeta(const GMatDesc& a, const GScalarDesc&) {
            return a;
        }
    };

    G_TYPED_KERNEL(GSum, <GScalar(GMat)>, "org.opencv.core.matrixop.sum") {
        static GScalarDesc outMeta(GMatDesc) {
            return empty_scalar_desc();
        }
    };

    G_TYPED_KERNEL(GCountNonZero, <GOpaque<int>(GMat)>, "org.opencv.core.matrixop.countNonZero") {
        static GOpaqueDesc outMeta(GMatDesc in) {
            GAPI_Assert(in.chan == 1);
            return empty_gopaque_desc();
        }
    };

    G_TYPED_KERNEL(GAddW, <GMat(GMat, double, GMat, double, double, int)>, "org.opencv.core.matrixop.addweighted") {
        static GMatDesc outMeta(GMatDesc a, double, GMatDesc b, double, double, int ddepth) {
            if (ddepth == -1)
            {
                // OpenCV: When the input arrays in add/subtract/multiply/divide
                // functions have different depths, the output array depth must be
                // explicitly specified!
                // See artim_op() @ arithm.cpp
                GAPI_Assert(a.chan == b.chan);
                GAPI_Assert(a.depth == b.depth);
                return a;
            }
            return a.withDepth(ddepth);
        }
    };

    G_TYPED_KERNEL(GNormL1, <GScalar(GMat)>, "org.opencv.core.matrixop.norml1") {
        static GScalarDesc outMeta(GMatDesc) {
            return empty_scalar_desc();
        }
    };

    G_TYPED_KERNEL(GNormL2, <GScalar(GMat)>, "org.opencv.core.matrixop.norml2") {
        static GScalarDesc outMeta(GMatDesc) {
            return empty_scalar_desc();
        }
    };

    G_TYPED_KERNEL(GNormInf, <GScalar(GMat)>, "org.opencv.core.matrixop.norminf") {
        static GScalarDesc outMeta(GMatDesc) {
            return empty_scalar_desc();
        }
    };

    G_TYPED_KERNEL_M(GIntegral, <GMat2(GMat, int, int)>, "org.opencv.core.matrixop.integral") {
        static std::tuple<GMatDesc, GMatDesc> outMeta(GMatDesc in, int sd, int sqd) {
            return std::make_tuple(in.withSizeDelta(1,1).withDepth(sd),
                                   in.withSizeDelta(1,1).withDepth(sqd));
        }
    };

    G_TYPED_KERNEL(GThreshold, <GMat(GMat, GScalar, GScalar, int)>, "org.opencv.core.matrixop.threshold") {
        static GMatDesc outMeta(GMatDesc in, GScalarDesc, GScalarDesc, int) {
            return in;
        }
    };


    G_TYPED_KERNEL_M(GThresholdOT, <GMatScalar(GMat, GScalar, int)>, "org.opencv.core.matrixop.thresholdOT") {
        static std::tuple<GMatDesc,GScalarDesc> outMeta(GMatDesc in, GScalarDesc, int) {
            return std::make_tuple(in, empty_scalar_desc());
        }
    };

    G_TYPED_KERNEL(GInRange, <GMat(GMat, GScalar, GScalar)>, "org.opencv.core.matrixop.inrange") {
        static GMatDesc outMeta(GMatDesc in, GScalarDesc, GScalarDesc) {
            return in.withType(CV_8U, 1);
        }
    };

    G_TYPED_KERNEL_M(GSplit3, <GMat3(GMat)>, "org.opencv.core.transform.split3") {
        static std::tuple<GMatDesc, GMatDesc, GMatDesc> outMeta(GMatDesc in) {
            const auto out_depth = in.depth;
            const auto out_desc  = in.withType(out_depth, 1);
            return std::make_tuple(out_desc, out_desc, out_desc);
        }
    };

    G_TYPED_KERNEL_M(GSplit4, <GMat4(GMat)>,"org.opencv.core.transform.split4") {
        static std::tuple<GMatDesc, GMatDesc, GMatDesc, GMatDesc> outMeta(GMatDesc in) {
            const auto out_depth = in.depth;
            const auto out_desc = in.withType(out_depth, 1);
            return std::make_tuple(out_desc, out_desc, out_desc, out_desc);
        }
    };

    G_TYPED_KERNEL(GResize, <GMat(GMat,Size,double,double,int)>, "org.opencv.core.transform.resize") {
        static GMatDesc outMeta(GMatDesc in, Size sz, double fx, double fy, int) {
            if (sz.width != 0 && sz.height != 0)
            {
                return in.withSize(sz);
            }
            else
            {
                int outSz_w = static_cast<int>(round(in.size.width  * fx));
                int outSz_h = static_cast<int>(round(in.size.height * fy));
                GAPI_Assert(outSz_w > 0 && outSz_h > 0);
                return in.withSize(Size(outSz_w, outSz_h));
            }
        }
    };

    G_TYPED_KERNEL(GResizeP, <GMatP(GMatP,Size,int)>, "org.opencv.core.transform.resizeP") {
        static GMatDesc outMeta(GMatDesc in, Size sz, int interp) {
            GAPI_Assert(in.depth == CV_8U);
            GAPI_Assert(in.chan == 3);
            GAPI_Assert(in.planar);
            GAPI_Assert(interp == cv::INTER_LINEAR);
            return in.withSize(sz);
        }
    };

    G_TYPED_KERNEL(GMerge3, <GMat(GMat,GMat,GMat)>, "org.opencv.core.transform.merge3") {
        static GMatDesc outMeta(GMatDesc in, GMatDesc, GMatDesc) {
            // Preserve depth and add channel component
            return in.withType(in.depth, 3);
        }
    };

    G_TYPED_KERNEL(GMerge4, <GMat(GMat,GMat,GMat,GMat)>, "org.opencv.core.transform.merge4") {
        static GMatDesc outMeta(GMatDesc in, GMatDesc, GMatDesc, GMatDesc) {
            // Preserve depth and add channel component
            return in.withType(in.depth, 4);
        }
    };

    G_TYPED_KERNEL(GRemap, <GMat(GMat, Mat, Mat, int, int, Scalar)>, "org.opencv.core.transform.remap") {
        static GMatDesc outMeta(GMatDesc in, Mat m1, Mat, int, int, Scalar) {
            return in.withSize(m1.size());
        }
    };

    G_TYPED_KERNEL(GFlip, <GMat(GMat, int)>, "org.opencv.core.transform.flip") {
        static GMatDesc outMeta(GMatDesc in, int) {
            return in;
        }
    };

    // TODO: eliminate the need in this kernel (streaming)
    G_TYPED_KERNEL(GCrop, <GMat(GMat, Rect)>, "org.opencv.core.transform.crop") {
        static GMatDesc outMeta(GMatDesc in, Rect rc) {
            return in.withSize(Size(rc.width, rc.height));
        }
    };

    G_TYPED_KERNEL(GConcatHor, <GMat(GMat, GMat)>, "org.opencv.imgproc.transform.concatHor") {
        static GMatDesc outMeta(GMatDesc l, GMatDesc r) {
            return l.withSizeDelta(+r.size.width, 0);
        }
    };

    G_TYPED_KERNEL(GConcatVert, <GMat(GMat, GMat)>, "org.opencv.imgproc.transform.concatVert") {
        static GMatDesc outMeta(GMatDesc t, GMatDesc b) {
            return t.withSizeDelta(0, +b.size.height);
        }
    };

    G_TYPED_KERNEL(GLUT, <GMat(GMat, Mat)>, "org.opencv.core.transform.LUT") {
        static GMatDesc outMeta(GMatDesc in, Mat) {
            return in;
        }
    };

    G_TYPED_KERNEL(GConvertTo, <GMat(GMat, int, double, double)>, "org.opencv.core.transform.convertTo") {
        static GMatDesc outMeta(GMatDesc in, int rdepth, double, double) {
            return rdepth < 0 ? in : in.withDepth(rdepth);
        }
    };

    G_TYPED_KERNEL(GSqrt, <GMat(GMat)>, "org.opencv.core.math.sqrt") {
        static GMatDesc outMeta(GMatDesc in) {
            return in;
        }
    };

    G_TYPED_KERNEL(GNormalize, <GMat(GMat, double, double, int, int)>, "org.opencv.core.normalize") {
        static GMatDesc outMeta(GMatDesc in, double, double, int, int ddepth) {
            // unlike opencv doesn't have a mask as a parameter
            return (ddepth < 0 ? in : in.withDepth(ddepth));
        }
    };

    G_TYPED_KERNEL(GWarpPerspective, <GMat(GMat, const Mat&, Size, int, int, const cv::Scalar&)>, "org.opencv.core.warpPerspective") {
        static GMatDesc outMeta(GMatDesc in, const Mat&, Size dsize, int, int borderMode, const cv::Scalar&) {
            GAPI_Assert((borderMode == cv::BORDER_CONSTANT || borderMode == cv::BORDER_REPLICATE) &&
                        "cv::gapi::warpPerspective supports only cv::BORDER_CONSTANT and cv::BORDER_REPLICATE border modes");
            return in.withType(in.depth, in.chan).withSize(dsize);
        }
    };

    G_TYPED_KERNEL(GWarpAffine, <GMat(GMat, const Mat&, Size, int, int, const cv::Scalar&)>, "org.opencv.core.warpAffine") {
        static GMatDesc outMeta(GMatDesc in, const Mat&, Size dsize, int, int border_mode, const cv::Scalar&) {
            GAPI_Assert(border_mode != cv::BORDER_TRANSPARENT &&
                        "cv::BORDER_TRANSPARENT mode is not supported in cv::gapi::warpAffine");
            return in.withType(in.depth, in.chan).withSize(dsize);
        }
    };

    G_TYPED_KERNEL(
        GKMeansND,
        <std::tuple<GOpaque<double>,GMat,GMat>(GMat,int,GMat,TermCriteria,int,KmeansFlags)>,
        "org.opencv.core.kmeansND") {

        static std::tuple<GOpaqueDesc,GMatDesc,GMatDesc>
        outMeta(const GMatDesc& in, int K, const GMatDesc& bestLabels, const TermCriteria&, int,
                KmeansFlags flags) {
            GAPI_Assert(in.depth == CV_32F);
            std::vector<int> amount_n_dim = detail::checkVector(in);
            int amount = amount_n_dim[0], dim = amount_n_dim[1];
            if (amount == -1)   // Mat with height != 1, width != 1, channels != 1 given
            {                   // which means that kmeans will consider the following:
                amount = in.size.height;
                dim    = in.size.width * in.chan;
            }
            // kmeans sets these labels' sizes when no bestLabels given:
            GMatDesc out_labels(CV_32S, 1, Size{1, amount});
            // kmeans always sets these centers' sizes:
            GMatDesc centers   (CV_32F, 1, Size{dim, K});
            if (flags & KMEANS_USE_INITIAL_LABELS)
            {
                GAPI_Assert(bestLabels.depth == CV_32S);
                int labels_amount = detail::checkVector(bestLabels, 1u);
                GAPI_Assert(labels_amount == amount);
                out_labels = bestLabels;  // kmeans preserves bestLabels' sizes if given
            }
            return std::make_tuple(empty_gopaque_desc(), out_labels, centers);
        }
    };

    G_TYPED_KERNEL(
        GKMeansNDNoInit,
        <std::tuple<GOpaque<double>,GMat,GMat>(GMat,int,TermCriteria,int,KmeansFlags)>,
        "org.opencv.core.kmeansNDNoInit") {

        static std::tuple<GOpaqueDesc,GMatDesc,GMatDesc>
        outMeta(const GMatDesc& in, int K, const TermCriteria&, int, KmeansFlags flags) {
            GAPI_Assert( !(flags & KMEANS_USE_INITIAL_LABELS) );
            GAPI_Assert(in.depth == CV_32F);
            std::vector<int> amount_n_dim = detail::checkVector(in);
            int amount = amount_n_dim[0], dim = amount_n_dim[1];
            if (amount == -1) // Mat with height != 1, width != 1, channels != 1 given
            {                   // which means that kmeans will consider the following:
                amount = in.size.height;
                dim    = in.size.width * in.chan;
            }
            GMatDesc out_labels(CV_32S, 1, Size{1, amount});
            GMatDesc centers   (CV_32F, 1, Size{dim, K});
            return std::make_tuple(empty_gopaque_desc(), out_labels, centers);
        }
    };

    G_TYPED_KERNEL(GKMeans2D, <std::tuple<GOpaque<double>,GArray<int>,GArray<Point2f>>
                               (GArray<Point2f>,int,GArray<int>,TermCriteria,int,KmeansFlags)>,
                   "org.opencv.core.kmeans2D") {
        static std::tuple<GOpaqueDesc,GArrayDesc,GArrayDesc>
        outMeta(const GArrayDesc&,int,const GArrayDesc&,const TermCriteria&,int,KmeansFlags) {
            return std::make_tuple(empty_gopaque_desc(), empty_array_desc(), empty_array_desc());
        }
    };

    G_TYPED_KERNEL(GKMeans3D, <std::tuple<GOpaque<double>,GArray<int>,GArray<Point3f>>
                               (GArray<Point3f>,int,GArray<int>,TermCriteria,int,KmeansFlags)>,
                   "org.opencv.core.kmeans3D") {
        static std::tuple<GOpaqueDesc,GArrayDesc,GArrayDesc>
        outMeta(const GArrayDesc&,int,const GArrayDesc&,const TermCriteria&,int,KmeansFlags) {
            return std::make_tuple(empty_gopaque_desc(), empty_array_desc(), empty_array_desc());
        }
    };
} // namespace core

namespace streaming {

// Operations for Streaming (declared in this header for convenience)
G_TYPED_KERNEL(GSize, <GOpaque<Size>(GMat)>, "org.opencv.streaming.size") {
    static GOpaqueDesc outMeta(const GMatDesc&) {
        return empty_gopaque_desc();
    }
};

G_TYPED_KERNEL(GSizeR, <GOpaque<Size>(GOpaque<Rect>)>, "org.opencv.streaming.sizeR") {
    static GOpaqueDesc outMeta(const GOpaqueDesc&) {
        return empty_gopaque_desc();
    }
};

G_TYPED_KERNEL(GSizeMF, <GOpaque<Size>(GFrame)>, "org.opencv.streaming.sizeMF") {
    static GOpaqueDesc outMeta(const GFrameDesc&) {
        return empty_gopaque_desc();
    }
};
} // namespace streaming

//! @addtogroup gapi_math
//! @{

/** @brief Calculates the per-element sum of two matrices.

The function add calculates sum of two matrices of the same size and the same number of channels:
\f[\texttt{dst}(I) =  \texttt{saturate} ( \texttt{src1}(I) +  \texttt{src2}(I)) \quad \texttt{if mask}(I) \ne0\f]

The function can be replaced with matrix expressions:
    \f[\texttt{dst} =  \texttt{src1} + \texttt{src2}\f]

The input matrices and the output matrix can all have the same or different depths. For example, you
can add a 16-bit unsigned matrix to a 8-bit signed matrix and store the sum as a 32-bit
floating-point matrix. Depth of the output matrix is determined by the ddepth parameter.
If src1.depth() == src2.depth(), ddepth can be set to the default -1. In this case, the output matrix will have
the same depth as the input matrices.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.add"
@param src1 first input matrix.
@param src2 second input matrix.
@param ddepth optional depth of the output matrix.
@sa sub, addWeighted
*/
GAPI_EXPORTS_W GMat add(const GMat& src1, const GMat& src2, int ddepth = -1);

/** @brief Calculates the per-element sum of matrix and given scalar.

The function addC adds a given scalar value to each element of given matrix.
The function can be replaced with matrix expressions:

    \f[\texttt{dst} =  \texttt{src1} + \texttt{c}\f]

Depth of the output matrix is determined by the ddepth parameter.
If ddepth is set to default -1, the depth of output matrix will be the same as the depth of input matrix.
The matrices can be single or multi channel. Output matrix must have the same size and number of channels as the input matrix.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.addC"
@param src1 first input matrix.
@param c scalar value to be added.
@param ddepth optional depth of the output matrix.
@sa sub, addWeighted
*/
GAPI_EXPORTS_W GMat addC(const GMat& src1, const GScalar& c, int ddepth = -1);
//! @overload
GAPI_EXPORTS GMat addC(const GScalar& c, const GMat& src1, int ddepth = -1);

/** @brief Calculates the per-element difference between two matrices.

The function sub calculates difference between two matrices, when both matrices have the same size and the same number of
channels:
    \f[\texttt{dst}(I) =   \texttt{src1}(I) -  \texttt{src2}(I)\f]

The function can be replaced with matrix expressions:
\f[\texttt{dst} =   \texttt{src1} -  \texttt{src2}\f]

The input matrices and the output matrix can all have the same or different depths. For example, you
can subtract two 8-bit unsigned matrices store the result as a 16-bit signed matrix.
Depth of the output matrix is determined by the ddepth parameter.
If src1.depth() == src2.depth(), ddepth can be set to the default -1. In this case, the output matrix will have
the same depth as the input matrices. The matrices can be single or multi channel.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.sub"
@param src1 first input matrix.
@param src2 second input matrix.
@param ddepth optional depth of the output matrix.
@sa  add, addC
  */
GAPI_EXPORTS GMat sub(const GMat& src1, const GMat& src2, int ddepth = -1);

/** @brief Calculates the per-element difference between matrix and given scalar.

The function can be replaced with matrix expressions:
    \f[\texttt{dst} =  \texttt{src} - \texttt{c}\f]

Depth of the output matrix is determined by the ddepth parameter.
If ddepth is set to default -1, the depth of output matrix will be the same as the depth of input matrix.
The matrices can be single or multi channel. Output matrix must have the same size as src.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.subC"
@param src first input matrix.
@param c scalar value to subtracted.
@param ddepth optional depth of the output matrix.
@sa  add, addC, subRC
  */
GAPI_EXPORTS GMat subC(const GMat& src, const GScalar& c, int ddepth = -1);

/** @brief Calculates the per-element difference between given scalar and the matrix.

The function can be replaced with matrix expressions:
    \f[\texttt{dst} =  \texttt{val} - \texttt{src}\f]

Depth of the output matrix is determined by the ddepth parameter.
If ddepth is set to default -1, the depth of output matrix will be the same as the depth of input matrix.
The matrices can be single or multi channel. Output matrix must have the same size as src.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.subRC"
@param c scalar value to subtract from.
@param src input matrix to be subtracted.
@param ddepth optional depth of the output matrix.
@sa  add, addC, subC
  */
GAPI_EXPORTS GMat subRC(const GScalar& c, const GMat& src, int ddepth = -1);

/** @brief Calculates the per-element scaled product of two matrices.

The function mul calculates the per-element product of two matrices:

\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{scale} \cdot \texttt{src1} (I)  \cdot \texttt{src2} (I))\f]

If src1.depth() == src2.depth(), ddepth can be set to the default -1. In this case, the output matrix will have
the same depth as the input matrices. The matrices can be single or multi channel.
Output matrix must have the same size as input matrices.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.mul"
@param src1 first input matrix.
@param src2 second input matrix of the same size and the same depth as src1.
@param scale optional scale factor.
@param ddepth optional depth of the output matrix.
@sa add, sub, div, addWeighted
*/
GAPI_EXPORTS GMat mul(const GMat& src1, const GMat& src2, double scale = 1.0, int ddepth = -1);

/** @brief Multiplies matrix by scalar.

The function mulC multiplies each element of matrix src by given scalar value:

\f[\texttt{dst} (I)= \texttt{saturate} (  \texttt{src1} (I)  \cdot \texttt{multiplier} )\f]

The matrices can be single or multi channel. Output matrix must have the same size as src.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.mulC"
@param src input matrix.
@param multiplier factor to be multiplied.
@param ddepth optional depth of the output matrix. If -1, the depth of output matrix will be the same as input matrix depth.
@sa add, sub, div, addWeighted
*/
GAPI_EXPORTS GMat mulC(const GMat& src, double multiplier, int ddepth = -1);
//! @overload
GAPI_EXPORTS GMat mulC(const GMat& src, const GScalar& multiplier, int ddepth = -1);   // FIXME: merge with mulc
//! @overload
GAPI_EXPORTS GMat mulC(const GScalar& multiplier, const GMat& src, int ddepth = -1);   // FIXME: merge with mulc

/** @brief Performs per-element division of two matrices.

The function divides one matrix by another:
\f[\texttt{dst(I) = saturate(src1(I)*scale/src2(I))}\f]

When src2(I) is zero, dst(I) will also be zero. Different channels of
multi-channel matrices are processed independently.
The matrices can be single or multi channel. Output matrix must have the same size and depth as src.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.div"
@param src1 first input matrix.
@param src2 second input matrix of the same size and depth as src1.
@param scale scalar factor.
@param ddepth optional depth of the output matrix; you can only pass -1 when src1.depth() == src2.depth().
@sa  mul, add, sub
*/
GAPI_EXPORTS GMat div(const GMat& src1, const GMat& src2, double scale, int ddepth = -1);

/** @brief Divides matrix by scalar.

The function divC divides each element of matrix src by given scalar value:

\f[\texttt{dst(I) = saturate(src(I)*scale/divisor)}\f]

When divisor is zero, dst(I) will also be zero. Different channels of
multi-channel matrices are processed independently.
The matrices can be single or multi channel. Output matrix must have the same size and depth as src.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.divC"
@param src input matrix.
@param divisor number to be divided by.
@param ddepth optional depth of the output matrix. If -1, the depth of output matrix will be the same as input matrix depth.
@param scale scale factor.
@sa add, sub, div, addWeighted
*/
GAPI_EXPORTS GMat divC(const GMat& src, const GScalar& divisor, double scale, int ddepth = -1);

/** @brief Divides scalar by matrix.

The function divRC divides given scalar by each element of matrix src and keep the division result in new matrix of the same size and type as src:

\f[\texttt{dst(I) = saturate(divident*scale/src(I))}\f]

When src(I) is zero, dst(I) will also be zero. Different channels of
multi-channel matrices are processed independently.
The matrices can be single or multi channel. Output matrix must have the same size and depth as src.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.divRC"
@param src input matrix.
@param divident number to be divided.
@param ddepth optional depth of the output matrix. If -1, the depth of output matrix will be the same as input matrix depth.
@param scale scale factor
@sa add, sub, div, addWeighted
*/
GAPI_EXPORTS GMat divRC(const GScalar& divident, const GMat& src, double scale, int ddepth = -1);

/** @brief Applies a mask to a matrix.

The function mask set value from given matrix if the corresponding pixel value in mask matrix set to true,
and set the matrix value to 0 otherwise.

Supported src matrix data types are @ref CV_8UC1, @ref CV_16SC1, @ref CV_16UC1. Supported mask data type is @ref CV_8UC1.

@note Function textual ID is "org.opencv.core.math.mask"
@param src input matrix.
@param mask input mask matrix.
*/
GAPI_EXPORTS GMat mask(const GMat& src, const GMat& mask);

/** @brief Calculates an average (mean) of matrix elements.

The function mean calculates the mean value M of matrix elements,
independently for each channel, and return it.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.math.mean"
@param src input matrix.
@sa  countNonZero, min, max
*/
GAPI_EXPORTS_W GScalar mean(const GMat& src);

/** @brief Calculates x and y coordinates of 2D vectors from their magnitude and angle.

The function polarToCart calculates the Cartesian coordinates of each 2D
vector represented by the corresponding elements of magnitude and angle:
\f[\begin{array}{l} \texttt{x} (I) =  \texttt{magnitude} (I) \cos ( \texttt{angle} (I)) \\ \texttt{y} (I) =  \texttt{magnitude} (I) \sin ( \texttt{angle} (I)) \\ \end{array}\f]

The relative accuracy of the estimated coordinates is about 1e-6.

First output is a matrix of x-coordinates of 2D vectors.
Second output is a matrix of y-coordinates of 2D vectors.
Both output must have the same size and depth as input matrices.

@note Function textual ID is "org.opencv.core.math.polarToCart"

@param magnitude input floating-point @ref CV_32FC1 matrix (1xN) of magnitudes of 2D vectors;
@param angle input floating-point @ref CV_32FC1 matrix (1xN) of angles of 2D vectors.
@param angleInDegrees when true, the input angles are measured in
degrees, otherwise, they are measured in radians.
@sa cartToPolar, exp, log, pow, sqrt
*/
GAPI_EXPORTS std::tuple<GMat, GMat> polarToCart(const GMat& magnitude, const GMat& angle,
                                              bool angleInDegrees = false);

/** @brief Calculates the magnitude and angle of 2D vectors.

The function cartToPolar calculates either the magnitude, angle, or both
for every 2D vector (x(I),y(I)):
\f[\begin{array}{l} \texttt{magnitude} (I)= \sqrt{\texttt{x}(I)^2+\texttt{y}(I)^2} , \\ \texttt{angle} (I)= \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))[ \cdot180 / \pi ] \end{array}\f]

The angles are calculated with accuracy about 0.3 degrees. For the point
(0,0), the angle is set to 0.

First output is a matrix of magnitudes of the same size and depth as input x.
Second output is a matrix of angles that has the same size and depth as
x; the angles are measured in radians (from 0 to 2\*Pi) or in degrees (0 to 360 degrees).

@note Function textual ID is "org.opencv.core.math.cartToPolar"

@param x matrix of @ref CV_32FC1 x-coordinates.
@param y array of @ref CV_32FC1 y-coordinates.
@param angleInDegrees a flag, indicating whether the angles are measured
in radians (which is by default), or in degrees.
@sa polarToCart
*/
GAPI_EXPORTS std::tuple<GMat, GMat> cartToPolar(const GMat& x, const GMat& y,
                                              bool angleInDegrees = false);

/** @brief Calculates the rotation angle of 2D vectors.

The function cv::phase calculates the rotation angle of each 2D vector that
is formed from the corresponding elements of x and y :
\f[\texttt{angle} (I) =  \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))\f]

The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0 ,
the corresponding angle(I) is set to 0.
@param x input floating-point array of x-coordinates of 2D vectors.
@param y input array of y-coordinates of 2D vectors; it must have the
same size and the same type as x.
@param angleInDegrees when true, the function calculates the angle in
degrees, otherwise, they are measured in radians.
@return array of vector angles; it has the same size and same type as x.
*/
GAPI_EXPORTS GMat phase(const GMat& x, const GMat &y, bool angleInDegrees = false);

/** @brief Calculates a square root of array elements.

The function cv::gapi::sqrt calculates a square root of each input array element.
In case of multi-channel arrays, each channel is processed
independently. The accuracy is approximately the same as of the built-in
std::sqrt .
@param src input floating-point array.
@return output array of the same size and type as src.
*/
GAPI_EXPORTS GMat sqrt(const GMat &src);

//! @} gapi_math
//!
//! @addtogroup gapi_pixelwise
//! @{

/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are greater compare to elements in second.

The function compares elements of two matrices src1 and src2 of the same size:
    \f[\texttt{dst} (I) =  \texttt{src1} (I)  > \texttt{src2} (I)\f]

When the comparison result is true, the corresponding element of output
array is set to 255. The comparison operations can be replaced with the
equivalent matrix expressions:
\f[\texttt{dst} =   \texttt{src1} > \texttt{src2}\f]

Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
    the input matrices/matrix.

Supported input matrix data types are @ref CV_8UC1, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGT"
@param src1 first input matrix.
@param src2 second input matrix/scalar of the same depth as first input matrix.
@sa min, max, threshold, cmpLE, cmpGE, cmpLT
*/
GAPI_EXPORTS GMat cmpGT(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGTScalar"
*/
GAPI_EXPORTS GMat cmpGT(const GMat& src1, const GScalar& src2);

/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are less than elements in second.

The function compares elements of two matrices src1 and src2 of the same size:
    \f[\texttt{dst} (I) =  \texttt{src1} (I)  < \texttt{src2} (I)\f]

When the comparison result is true, the corresponding element of output
array is set to 255. The comparison operations can be replaced with the
equivalent matrix expressions:
    \f[\texttt{dst} =   \texttt{src1} < \texttt{src2}\f]

Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
    the input matrices/matrix.

Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLT"
@param src1 first input matrix.
@param src2 second input matrix/scalar of the same depth as first input matrix.
@sa min, max, threshold, cmpLE, cmpGE, cmpGT
*/
GAPI_EXPORTS GMat cmpLT(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLTScalar"
*/
GAPI_EXPORTS GMat cmpLT(const GMat& src1, const GScalar& src2);

/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are greater or equal compare to elements in second.

The function compares elements of two matrices src1 and src2 of the same size:
    \f[\texttt{dst} (I) =  \texttt{src1} (I)  >= \texttt{src2} (I)\f]

When the comparison result is true, the corresponding element of output
array is set to 255. The comparison operations can be replaced with the
equivalent matrix expressions:
    \f[\texttt{dst} =   \texttt{src1} >= \texttt{src2}\f]

Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
    the input matrices.

Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGE"
@param src1 first input matrix.
@param src2 second input matrix/scalar of the same depth as first input matrix.
@sa min, max, threshold, cmpLE, cmpGT, cmpLT
*/
GAPI_EXPORTS GMat cmpGE(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLGEcalar"
*/
GAPI_EXPORTS GMat cmpGE(const GMat& src1, const GScalar& src2);

/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are less or equal compare to elements in second.

The function compares elements of two matrices src1 and src2 of the same size:
    \f[\texttt{dst} (I) =  \texttt{src1} (I)  <=  \texttt{src2} (I)\f]

When the comparison result is true, the corresponding element of output
array is set to 255. The comparison operations can be replaced with the
equivalent matrix expressions:
    \f[\texttt{dst} =   \texttt{src1} <= \texttt{src2}\f]

Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
    the input matrices.

Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLE"
@param src1 first input matrix.
@param src2 second input matrix/scalar of the same depth as first input matrix.
@sa min, max, threshold, cmpGT, cmpGE, cmpLT
*/
GAPI_EXPORTS GMat cmpLE(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLEScalar"
*/
GAPI_EXPORTS GMat cmpLE(const GMat& src1, const GScalar& src2);

/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are equal to elements in second.

The function compares elements of two matrices src1 and src2 of the same size:
    \f[\texttt{dst} (I) =  \texttt{src1} (I)  ==  \texttt{src2} (I)\f]

When the comparison result is true, the corresponding element of output
array is set to 255. The comparison operations can be replaced with the
equivalent matrix expressions:
    \f[\texttt{dst} =   \texttt{src1} == \texttt{src2}\f]

Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
    the input matrices.

Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpEQ"
@param src1 first input matrix.
@param src2 second input matrix/scalar of the same depth as first input matrix.
@sa min, max, threshold, cmpNE
*/
GAPI_EXPORTS GMat cmpEQ(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpEQScalar"
*/
GAPI_EXPORTS GMat cmpEQ(const GMat& src1, const GScalar& src2);

/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are not equal to elements in second.

The function compares elements of two matrices src1 and src2 of the same size:
    \f[\texttt{dst} (I) =  \texttt{src1} (I)  !=  \texttt{src2} (I)\f]

When the comparison result is true, the corresponding element of output
array is set to 255. The comparison operations can be replaced with the
equivalent matrix expressions:
    \f[\texttt{dst} =   \texttt{src1} != \texttt{src2}\f]

Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
    the input matrices.

Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpNE"
@param src1 first input matrix.
@param src2 second input matrix/scalar of the same depth as first input matrix.
@sa min, max, threshold, cmpEQ
*/
GAPI_EXPORTS GMat cmpNE(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpNEScalar"
*/
GAPI_EXPORTS GMat cmpNE(const GMat& src1, const GScalar& src2);

/** @brief computes bitwise conjunction of the two matrixes (src1 & src2)
Calculates the per-element bit-wise logical conjunction of two matrices of the same size.

In case of floating-point matrices, their machine-specific bit
representations (usually IEEE754-compliant) are used for the operation.
In case of multi-channel matrices, each channel is processed
independently. Output matrix must have the same size and depth as the input
matrices.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.bitwise_and"

@param src1 first input matrix.
@param src2 second input matrix.
*/
GAPI_EXPORTS GMat bitwise_and(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.bitwise_andS"
@param src1 first input matrix.
@param src2 scalar, which will be per-lemenetly conjuncted with elements of src1.
*/
GAPI_EXPORTS GMat bitwise_and(const GMat& src1, const GScalar& src2);

/** @brief computes bitwise disjunction of the two matrixes (src1 | src2)
Calculates the per-element bit-wise logical disjunction of two matrices of the same size.

In case of floating-point matrices, their machine-specific bit
representations (usually IEEE754-compliant) are used for the operation.
In case of multi-channel matrices, each channel is processed
independently. Output matrix must have the same size and depth as the input
matrices.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.bitwise_or"

@param src1 first input matrix.
@param src2 second input matrix.
*/
GAPI_EXPORTS GMat bitwise_or(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.bitwise_orS"
@param src1 first input matrix.
@param src2 scalar, which will be per-lemenetly disjuncted with elements of src1.
*/
GAPI_EXPORTS GMat bitwise_or(const GMat& src1, const GScalar& src2);


/** @brief computes bitwise logical "exclusive or" of the two matrixes (src1 ^ src2)
Calculates the per-element bit-wise logical "exclusive or" of two matrices of the same size.

In case of floating-point matrices, their machine-specific bit
representations (usually IEEE754-compliant) are used for the operation.
In case of multi-channel matrices, each channel is processed
independently. Output matrix must have the same size and depth as the input
matrices.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.bitwise_xor"

@param src1 first input matrix.
@param src2 second input matrix.
*/
GAPI_EXPORTS GMat bitwise_xor(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.bitwise_xorS"
@param src1 first input matrix.
@param src2 scalar, for which per-lemenet "logical or" operation on elements of src1 will be performed.
*/
GAPI_EXPORTS GMat bitwise_xor(const GMat& src1, const GScalar& src2);


/** @brief Inverts every bit of an array.

The function bitwise_not calculates per-element bit-wise inversion of the input
matrix:
\f[\texttt{dst} (I) =  \neg \texttt{src} (I)\f]

In case of floating-point matrices, their machine-specific bit
representations (usually IEEE754-compliant) are used for the operation.
In case of multi-channel matrices, each channel is processed
independently. Output matrix must have the same size and depth as the input
matrix.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.bitwise_not"

@param src input matrix.
*/
GAPI_EXPORTS GMat bitwise_not(const GMat& src);

/** @brief Select values from either first or second of input matrices by given mask.
The function set to the output matrix either the value from the first input matrix if corresponding value of mask matrix is 255,
 or value from the second input matrix (if value of mask matrix set to 0).

Input mask matrix must be of @ref CV_8UC1 type, two other inout matrices and output matrix should be of the same type. The size should
be the same for all input and output matrices.
Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.pixelwise.select"

@param src1 first input matrix.
@param src2 second input matrix.
@param mask mask input matrix.
*/
GAPI_EXPORTS GMat select(const GMat& src1, const GMat& src2, const GMat& mask);

//! @} gapi_pixelwise


//! @addtogroup gapi_matrixop
//! @{
/** @brief Calculates per-element minimum of two matrices.

The function min calculates the per-element minimum of two matrices of the same size, number of channels and depth:
\f[\texttt{dst} (I)= \min ( \texttt{src1} (I), \texttt{src2} (I))\f]
    where I is a multi-dimensional index of matrix elements. In case of
    multi-channel matrices, each channel is processed independently.
Output matrix must be of the same size and depth as src1.

Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.matrixop.min"
@param src1 first input matrix.
@param src2 second input matrix of the same size and depth as src1.
@sa max, cmpEQ, cmpLT, cmpLE
*/
GAPI_EXPORTS GMat min(const GMat& src1, const GMat& src2);

/** @brief Calculates per-element maximum of two matrices.

The function max calculates the per-element maximum of two matrices of the same size, number of channels and depth:
\f[\texttt{dst} (I)= \max ( \texttt{src1} (I), \texttt{src2} (I))\f]
    where I is a multi-dimensional index of matrix elements. In case of
    multi-channel matrices, each channel is processed independently.
Output matrix must be of the same size and depth as src1.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.matrixop.max"
@param src1 first input matrix.
@param src2 second input matrix of the same size and depth as src1.
@sa min, compare, cmpEQ, cmpGT, cmpGE
*/
GAPI_EXPORTS GMat max(const GMat& src1, const GMat& src2);

/** @brief Calculates the per-element absolute difference between two matrices.

The function absDiff calculates absolute difference between two matrices of the same size and depth:
    \f[\texttt{dst}(I) =  \texttt{saturate} (| \texttt{src1}(I) -  \texttt{src2}(I)|)\f]
    where I is a multi-dimensional index of matrix elements. In case of
    multi-channel matrices, each channel is processed independently.
Output matrix must have the same size and depth as input matrices.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.matrixop.absdiff"
@param src1 first input matrix.
@param src2 second input matrix.
@sa abs
*/
GAPI_EXPORTS GMat absDiff(const GMat& src1, const GMat& src2);

/** @brief Calculates absolute value of matrix elements.

The function abs calculates absolute difference between matrix elements and given scalar value:
    \f[\texttt{dst}(I) =  \texttt{saturate} (| \texttt{src1}(I) -  \texttt{matC}(I)|)\f]
    where matC is constructed from given scalar c and has the same sizes and depth as input matrix src.

Output matrix must be of the same size and depth as src.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.matrixop.absdiffC"
@param src input matrix.
@param c scalar to be subtracted.
@sa min, max
*/
GAPI_EXPORTS GMat absDiffC(const GMat& src, const GScalar& c);

/** @brief Calculates sum of all matrix elements.

The function sum calculates sum of all matrix elements, independently for each channel.

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.matrixop.sum"
@param src input matrix.
@sa countNonZero, mean, min, max
*/
GAPI_EXPORTS GScalar sum(const GMat& src);

/** @brief Counts non-zero array elements.

The function returns the number of non-zero elements in src :
\f[\sum _{I: \; \texttt{src} (I) \ne0 } 1\f]

Supported matrix data types are @ref CV_8UC1, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.matrixop.countNonZero"
@param src input single-channel matrix.
@sa  mean, min, max
*/
GAPI_EXPORTS GOpaque<int> countNonZero(const GMat& src);

/** @brief Calculates the weighted sum of two matrices.

The function addWeighted calculates the weighted sum of two matrices as follows:
\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{src1} (I)* \texttt{alpha} +  \texttt{src2} (I)* \texttt{beta} +  \texttt{gamma} )\f]
where I is a multi-dimensional index of array elements. In case of multi-channel matrices, each
channel is processed independently.

The function can be replaced with a matrix expression:
    \f[\texttt{dst}(I) =  \texttt{alpha} * \texttt{src1}(I) - \texttt{beta} * \texttt{src2}(I) + \texttt{gamma} \f]

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.matrixop.addweighted"
@param src1 first input matrix.
@param alpha weight of the first matrix elements.
@param src2 second input matrix of the same size and channel number as src1.
@param beta weight of the second matrix elements.
@param gamma scalar added to each sum.
@param ddepth optional depth of the output matrix.
@sa  add, sub
*/
GAPI_EXPORTS GMat addWeighted(const GMat& src1, double alpha, const GMat& src2, double beta, double gamma, int ddepth = -1);

/** @brief Calculates the  absolute L1 norm of a matrix.

This version of normL1 calculates the absolute L1 norm of src.

As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$.
The \f$ L_{1} \f$ norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$
is calculated as follows
\f{align*}
    \| r(-1) \|_{L_1} &= |-1| + |2| = 3 \\
\f}
and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is
\f{align*}
    \| r(0.5) \|_{L_1} &= |0.5| + |0.5| = 1 \\
\f}

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.matrixop.norml1"
@param src input matrix.
@sa normL2, normInf
*/
GAPI_EXPORTS GScalar normL1(const GMat& src);

/** @brief Calculates the absolute L2 norm of a matrix.

This version of normL2 calculates the absolute L2 norm of src.

As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$.
The \f$ L_{2} \f$  norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$
is calculated as follows
\f{align*}
    \| r(-1) \|_{L_2} &= \sqrt{(-1)^{2} + (2)^{2}} = \sqrt{5} \\
\f}
and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is
\f{align*}
    \| r(0.5) \|_{L_2} &= \sqrt{(0.5)^{2} + (0.5)^{2}} = \sqrt{0.5} \\
\f}

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
@note Function textual ID is "org.opencv.core.matrixop.norml2"
@param src input matrix.
@sa normL1, normInf
*/
GAPI_EXPORTS GScalar normL2(const GMat& src);

/** @brief Calculates the absolute infinite norm of a matrix.

This version of normInf calculates the absolute infinite norm of src.

As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$.
The \f$ L_{\infty} \f$ norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$
is calculated as follows
\f{align*}
    \| r(-1) \|_{L_\infty} &= \max(|-1|,|2|) = 2
\f}
and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is
\f{align*}
    \| r(0.5) \|_{L_\infty} &= \max(|0.5|,|0.5|) = 0.5.
\f}

Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.core.matrixop.norminf"
@param src input matrix.
@sa normL1, normL2
*/
GAPI_EXPORTS GScalar normInf(const GMat& src);

/** @brief Calculates the integral of an image.

The function calculates one or more integral images for the source image as follows:

\f[\texttt{sum} (X,Y) =  \sum _{x<X,y<Y}  \texttt{image} (x,y)\f]

\f[\texttt{sqsum} (X,Y) =  \sum _{x<X,y<Y}  \texttt{image} (x,y)^2\f]

The function return integral image as \f$(W+1)\times (H+1)\f$ , 32-bit integer or floating-point (32f or 64f) and
 integral image for squared pixel values; it is \f$(W+1)\times (H+)\f$, double-precision floating-point (64f) array.

@note Function textual ID is "org.opencv.core.matrixop.integral"

@param src input image.
@param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or
CV_64F.
@param sqdepth desired depth of the integral image of squared pixel values, CV_32F or CV_64F.
 */
GAPI_EXPORTS std::tuple<GMat, GMat> integral(const GMat& src, int sdepth = -1, int sqdepth = -1);

/** @brief Applies a fixed-level threshold to each matrix element.

The function applies fixed-level thresholding to a single- or multiple-channel matrix.
The function is typically used to get a bi-level (binary) image out of a grayscale image ( cmp functions could be also used for
this purpose) or for removing a noise, that is, filtering out pixels with too small or too large
values. There are several types of thresholding supported by the function. They are determined by
type parameter.

Also, the special values cv::THRESH_OTSU or cv::THRESH_TRIANGLE may be combined with one of the
above values. In these cases, the function determines the optimal threshold value using the Otsu's
or Triangle algorithm and uses it instead of the specified thresh . The function returns the
computed threshold value in addititon to thresholded matrix.
The Otsu's and Triangle methods are implemented only for 8-bit matrices.

Input image should be single channel only in case of cv::THRESH_OTSU or cv::THRESH_TRIANGLE flags.
Output matrix must be of the same size and depth as src.

@note Function textual ID is "org.opencv.core.matrixop.threshold"

@param src input matrix (@ref CV_8UC1, @ref CV_8UC3, or @ref CV_32FC1).
@param thresh threshold value.
@param maxval maximum value to use with the cv::THRESH_BINARY and cv::THRESH_BINARY_INV thresholding
types.
@param type thresholding type (see the cv::ThresholdTypes).

@sa min, max, cmpGT, cmpLE, cmpGE, cmpLT
 */
GAPI_EXPORTS GMat threshold(const GMat& src, const GScalar& thresh, const GScalar& maxval, int type);
/** @overload
This function applicable for all threshold types except CV_THRESH_OTSU and CV_THRESH_TRIANGLE
@note Function textual ID is "org.opencv.core.matrixop.thresholdOT"
*/
GAPI_EXPORTS_W std::tuple<GMat, GScalar> threshold(const GMat& src, const GScalar& maxval, int type);

/** @brief Applies a range-level threshold to each matrix element.

The function applies range-level thresholding to a single- or multiple-channel matrix.
It sets output pixel value to OxFF if the corresponding pixel value of input matrix is in specified range,or 0 otherwise.

Input and output matrices must be CV_8UC1.

@note Function textual ID is "org.opencv.core.matrixop.inRange"

@param src input matrix (CV_8UC1).
@param threshLow lower boundary value.
@param threshUp upper boundary value.

@sa threshold
 */
GAPI_EXPORTS GMat inRange(const GMat& src, const GScalar& threshLow, const GScalar& threshUp);

//! @} gapi_matrixop

//! @addtogroup gapi_transform
//! @{
/** @brief Resizes an image.

The function resizes the image src down to or up to the specified size.

Output image size will have the size dsize (when dsize is non-zero) or the size computed from
src.size(), fx, and fy; the depth of output is the same as of src.

If you want to resize src so that it fits the pre-created dst,
you may call the function as follows:
@code
    // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
    resize(src, dst, dst.size(), 0, 0, interpolation);
@endcode
If you want to decimate the image by factor of 2 in each direction, you can call the function this
way:
@code
    // specify fx and fy and let the function compute the destination image size.
    resize(src, dst, Size(), 0.5, 0.5, interpolation);
@endcode
To shrink an image, it will generally look best with cv::INTER_AREA interpolation, whereas to
enlarge an image, it will generally look best with cv::INTER_CUBIC (slow) or cv::INTER_LINEAR
(faster but still looks OK).

@note Function textual ID is "org.opencv.core.transform.resize"

@param src input image.
@param dsize output image size; if it equals zero, it is computed as:
 \f[\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\f]
 Either dsize or both fx and fy must be non-zero.
@param fx scale factor along the horizontal axis; when it equals 0, it is computed as
\f[\texttt{(double)dsize.width/src.cols}\f]
@param fy scale factor along the vertical axis; when it equals 0, it is computed as
\f[\texttt{(double)dsize.height/src.rows}\f]
@param interpolation interpolation method, see cv::InterpolationFlags

@sa  warpAffine, warpPerspective, remap, resizeP
 */
GAPI_EXPORTS GMat resize(const GMat& src, const Size& dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR);

/** @brief Resizes a planar image.

The function resizes the image src down to or up to the specified size.
Planar image memory layout is three planes laying in the memory contiguously,
so the image height should be plane_height*plane_number, image type is @ref CV_8UC1.

Output image size will have the size dsize, the depth of output is the same as of src.

@note Function textual ID is "org.opencv.core.transform.resizeP"

@param src input image, must be of @ref CV_8UC1 type;
@param dsize output image size;
@param interpolation interpolation method, only cv::INTER_LINEAR is supported at the moment

@sa  warpAffine, warpPerspective, remap, resize
 */
GAPI_EXPORTS GMatP resizeP(const GMatP& src, const Size& dsize, int interpolation = cv::INTER_LINEAR);

/** @brief Creates one 4-channel matrix out of 4 single-channel ones.

The function merges several matrices to make a single multi-channel matrix. That is, each
element of the output matrix will be a concatenation of the elements of the input matrices, where
elements of i-th input matrix are treated as mv[i].channels()-element vectors.
Output matrix must be of @ref CV_8UC4 type.

The function split4 does the reverse operation.

@note
 - Function textual ID is "org.opencv.core.transform.merge4"

@param src1 first input @ref CV_8UC1 matrix to be merged.
@param src2 second input @ref CV_8UC1 matrix to be merged.
@param src3 third input @ref CV_8UC1 matrix to be merged.
@param src4 fourth input @ref CV_8UC1 matrix to be merged.
@sa merge3, split4, split3
*/
GAPI_EXPORTS GMat merge4(const GMat& src1, const GMat& src2, const GMat& src3, const GMat& src4);

/** @brief Creates one 3-channel matrix out of 3 single-channel ones.

The function merges several matrices to make a single multi-channel matrix. That is, each
element of the output matrix will be a concatenation of the elements of the input matrices, where
elements of i-th input matrix are treated as mv[i].channels()-element vectors.
Output matrix must be of @ref CV_8UC3 type.

The function split3 does the reverse operation.

@note
 - Function textual ID is "org.opencv.core.transform.merge3"

@param src1 first input @ref CV_8UC1 matrix to be merged.
@param src2 second input @ref CV_8UC1 matrix to be merged.
@param src3 third input @ref CV_8UC1 matrix to be merged.
@sa merge4, split4, split3
*/
GAPI_EXPORTS GMat merge3(const GMat& src1, const GMat& src2, const GMat& src3);

/** @brief Divides a 4-channel matrix into 4 single-channel matrices.

The function splits a 4-channel matrix into 4 single-channel matrices:
\f[\texttt{mv} [c](I) =  \texttt{src} (I)_c\f]

All output matrices must be of @ref CV_8UC1 type.

The function merge4 does the reverse operation.

@note
 - Function textual ID is "org.opencv.core.transform.split4"

@param src input @ref CV_8UC4 matrix.
@sa split3, merge3, merge4
*/
GAPI_EXPORTS std::tuple<GMat, GMat, GMat,GMat> split4(const GMat& src);

/** @brief Divides a 3-channel matrix into 3 single-channel matrices.

The function splits a 3-channel matrix into 3 single-channel matrices:
\f[\texttt{mv} [c](I) =  \texttt{src} (I)_c\f]

All output matrices must be of @ref CV_8UC1 type.

The function merge3 does the reverse operation.

@note
 - Function textual ID is "org.opencv.core.transform.split3"

@param src input @ref CV_8UC3 matrix.
@sa split4, merge3, merge4
*/
GAPI_EXPORTS_W std::tuple<GMat, GMat, GMat> split3(const GMat& src);

/** @brief Applies a generic geometrical transformation to an image.

The function remap transforms the source image using the specified map:

\f[\texttt{dst} (x,y) =  \texttt{src} (map_x(x,y),map_y(x,y))\f]

where values of pixels with non-integer coordinates are computed using one of available
interpolation methods. \f$map_x\f$ and \f$map_y\f$ can be encoded as separate floating-point maps
in \f$map_1\f$ and \f$map_2\f$ respectively, or interleaved floating-point maps of \f$(x,y)\f$ in
\f$map_1\f$, or fixed-point maps created by using convertMaps. The reason you might want to
convert from floating to fixed-point representations of a map is that they can yield much faster
(\~2x) remapping operations. In the converted case, \f$map_1\f$ contains pairs (cvFloor(x),
cvFloor(y)) and \f$map_2\f$ contains indices in a table of interpolation coefficients.
Output image must be of the same size and depth as input one.

@note
 - Function textual ID is "org.opencv.core.transform.remap"
 - Due to current implementation limitations the size of an input and output images should be less than 32767x32767.

@param src Source image.
@param map1 The first map of either (x,y) points or just x values having the type CV_16SC2,
CV_32FC1, or CV_32FC2.
@param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
if map1 is (x,y) points), respectively.
@param interpolation Interpolation method (see cv::InterpolationFlags). The methods #INTER_AREA
and #INTER_LINEAR_EXACT are not supported by this function.
@param borderMode Pixel extrapolation method (see cv::BorderTypes). When
borderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image that
corresponds to the "outliers" in the source image are not modified by the function.
@param borderValue Value used in case of a constant border. By default, it is 0.
 */
GAPI_EXPORTS GMat remap(const GMat& src, const Mat& map1, const Mat& map2,
                      int interpolation, int borderMode = BORDER_CONSTANT,
                      const Scalar& borderValue = Scalar());

/** @brief Flips a 2D matrix around vertical, horizontal, or both axes.

The function flips the matrix in one of three different ways (row
and column indices are 0-based):
\f[\texttt{dst} _{ij} =
\left\{
\begin{array}{l l}
\texttt{src} _{\texttt{src.rows}-i-1,j} & if\;  \texttt{flipCode} = 0 \\
\texttt{src} _{i, \texttt{src.cols} -j-1} & if\;  \texttt{flipCode} > 0 \\
\texttt{src} _{ \texttt{src.rows} -i-1, \texttt{src.cols} -j-1} & if\; \texttt{flipCode} < 0 \\
\end{array}
\right.\f]
The example scenarios of using the function are the following:
*   Vertical flipping of the image (flipCode == 0) to switch between
    top-left and bottom-left image origin. This is a typical operation
    in video processing on Microsoft Windows\* OS.
*   Horizontal flipping of the image with the subsequent horizontal
    shift and absolute difference calculation to check for a
    vertical-axis symmetry (flipCode \> 0).
*   Simultaneous horizontal and vertical flipping of the image with
    the subsequent shift and absolute difference calculation to check
    for a central symmetry (flipCode \< 0).
*   Reversing the order of point arrays (flipCode \> 0 or
    flipCode == 0).
Output image must be of the same depth as input one, size should be correct for given flipCode.

@note Function textual ID is "org.opencv.core.transform.flip"

@param src input matrix.
@param flipCode a flag to specify how to flip the array; 0 means
flipping around the x-axis and positive value (for example, 1) means
flipping around y-axis. Negative value (for example, -1) means flipping
around both axes.
@sa remap
*/
GAPI_EXPORTS GMat flip(const GMat& src, int flipCode);

/** @brief Crops a 2D matrix.

The function crops the matrix by given cv::Rect.

Output matrix must be of the same depth as input one, size is specified by given rect size.

@note Function textual ID is "org.opencv.core.transform.crop"

@param src input matrix.
@param rect a rect to crop a matrix to
@sa resize
*/
GAPI_EXPORTS GMat crop(const GMat& src, const Rect& rect);

/** @brief Applies horizontal concatenation to given matrices.

The function horizontally concatenates two GMat matrices (with the same number of rows).
@code{.cpp}
    GMat A = { 1, 4,
               2, 5,
               3, 6 };
    GMat B = { 7, 10,
               8, 11,
               9, 12 };

    GMat C = gapi::concatHor(A, B);
    //C:
    //[1, 4, 7, 10;
    // 2, 5, 8, 11;
    // 3, 6, 9, 12]
@endcode
Output matrix must the same number of rows and depth as the src1 and src2, and the sum of cols of the src1 and src2.
Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.imgproc.transform.concatHor"

@param src1 first input matrix to be considered for horizontal concatenation.
@param src2 second input matrix to be considered for horizontal concatenation.
@sa concatVert
*/
GAPI_EXPORTS GMat concatHor(const GMat& src1, const GMat& src2);

/** @overload
The function horizontally concatenates given number of GMat matrices (with the same number of columns).
Output matrix must the same number of columns and depth as the input matrices, and the sum of rows of input matrices.

@param v vector of input matrices to be concatenated horizontally.
*/
GAPI_EXPORTS GMat concatHor(const std::vector<GMat> &v);

/** @brief Applies vertical concatenation to given matrices.

The function vertically concatenates two GMat matrices (with the same number of cols).
 @code{.cpp}
    GMat A = { 1, 7,
               2, 8,
               3, 9 };
    GMat B = { 4, 10,
               5, 11,
               6, 12 };

    GMat C = gapi::concatVert(A, B);
    //C:
    //[1, 7;
    // 2, 8;
    // 3, 9;
    // 4, 10;
    // 5, 11;
    // 6, 12]
 @endcode

Output matrix must the same number of cols and depth as the src1 and src2, and the sum of rows of the src1 and src2.
Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.

@note Function textual ID is "org.opencv.imgproc.transform.concatVert"

@param src1 first input matrix to be considered for vertical concatenation.
@param src2 second input matrix to be considered for vertical concatenation.
@sa concatHor
*/
GAPI_EXPORTS GMat concatVert(const GMat& src1, const GMat& src2);

/** @overload
The function vertically concatenates given number of GMat matrices (with the same number of columns).
Output matrix must the same number of columns and depth as the input matrices, and the sum of rows of input matrices.

@param v vector of input matrices to be concatenated vertically.
*/
GAPI_EXPORTS GMat concatVert(const std::vector<GMat> &v);


/** @brief Performs a look-up table transform of a matrix.

The function LUT fills the output matrix with values from the look-up table. Indices of the entries
are taken from the input matrix. That is, the function processes each element of src as follows:
\f[\texttt{dst} (I)  \leftarrow \texttt{lut(src(I))}\f]

Supported matrix data types are @ref CV_8UC1.
Output is a matrix of the same size and number of channels as src, and the same depth as lut.

@note Function textual ID is "org.opencv.core.transform.LUT"

@param src input matrix of 8-bit elements.
@param lut look-up table of 256 elements; in case of multi-channel input array, the table should
either have a single channel (in this case the same table is used for all channels) or the same
number of channels as in the input matrix.
*/
GAPI_EXPORTS GMat LUT(const GMat& src, const Mat& lut);

/** @brief Converts a matrix to another data depth with optional scaling.

The method converts source pixel values to the target data depth. saturate_cast\<\> is applied at
the end to avoid possible overflows:

\f[m(x,y) = saturate \_ cast<rType>( \alpha (*this)(x,y) +  \beta )\f]
Output matrix must be of the same size as input one.

@note Function textual ID is "org.opencv.core.transform.convertTo"
@param src input matrix to be converted from.
@param rdepth desired output matrix depth or, rather, the depth since the number of channels are the
same as the input has; if rdepth is negative, the output matrix will have the same depth as the input.
@param alpha optional scale factor.
@param beta optional delta added to the scaled values.
 */
GAPI_EXPORTS GMat convertTo(const GMat& src, int rdepth, double alpha=1, double beta=0);

/** @brief Normalizes the norm or value range of an array.

The function normalizes scale and shift the input array elements so that
\f[\| \texttt{dst} \| _{L_p}= \texttt{alpha}\f]
(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that
\f[\min _I  \texttt{dst} (I)= \texttt{alpha} , \, \, \max _I  \texttt{dst} (I)= \texttt{beta}\f]
when normType=NORM_MINMAX (for dense arrays only).

@note Function textual ID is "org.opencv.core.normalize"

@param src input array.
@param alpha norm value to normalize to or the lower range boundary in case of the range
normalization.
@param beta upper range boundary in case of the range normalization; it is not used for the norm
normalization.
@param norm_type normalization type (see cv::NormTypes).
@param ddepth when negative, the output array has the same type as src; otherwise, it has the same
number of channels as src and the depth =ddepth.
@sa norm, Mat::convertTo
*/
GAPI_EXPORTS GMat normalize(const GMat& src, double alpha, double beta,
                            int norm_type, int ddepth = -1);

/** @brief Applies a perspective transformation to an image.

The function warpPerspective transforms the source image using the specified matrix:

\f[\texttt{dst} (x,y) =  \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
     \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\f]

when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
and then put in the formula above instead of M. The function cannot operate in-place.

@param src input image.
@param M \f$3\times 3\f$ transformation matrix.
@param dsize size of the output image.
@param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
\f$\texttt{dst}\rightarrow\texttt{src}\f$ ).
@param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE).
@param borderValue value used in case of a constant border; by default, it equals 0.

@sa  warpAffine, resize, remap, getRectSubPix, perspectiveTransform
 */
GAPI_EXPORTS GMat warpPerspective(const GMat& src, const Mat& M, const Size& dsize, int flags = cv::INTER_LINEAR,
                                  int borderMode = cv::BORDER_CONSTANT, const Scalar& borderValue = Scalar());

/** @brief Applies an affine transformation to an image.

The function warpAffine transforms the source image using the specified matrix:

\f[\texttt{dst} (x,y) =  \texttt{src} ( \texttt{M} _{11} x +  \texttt{M} _{12} y +  \texttt{M} _{13}, \texttt{M} _{21} x +  \texttt{M} _{22} y +  \texttt{M} _{23})\f]

when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
with #invertAffineTransform and then put in the formula above instead of M. The function cannot
operate in-place.

@param src input image.
@param M \f$2\times 3\f$ transformation matrix.
@param dsize size of the output image.
@param flags combination of interpolation methods (see #InterpolationFlags) and the optional
flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
\f$\texttt{dst}\rightarrow\texttt{src}\f$ ).
@param borderMode pixel extrapolation method (see #BorderTypes);
borderMode=#BORDER_TRANSPARENT isn't supported
@param borderValue value used in case of a constant border; by default, it is 0.

@sa  warpPerspective, resize, remap, getRectSubPix, transform
 */
GAPI_EXPORTS GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, int flags = cv::INTER_LINEAR,
                             int borderMode = cv::BORDER_CONSTANT, const Scalar& borderValue = Scalar());
//! @} gapi_transform

/** @brief Finds centers of clusters and groups input samples around the clusters.

The function kmeans implements a k-means algorithm that finds the centers of K clusters
and groups the input samples around the clusters. As an output, \f$\texttt{bestLabels}_i\f$
contains a 0-based cluster index for the \f$i^{th}\f$ sample.

@note
 - Function textual ID is "org.opencv.core.kmeansND"
 - In case of an N-dimentional points' set given, input GMat can have the following traits:
2 dimensions, a single row or column if there are N channels,
or N columns if there is a single channel. Mat should have @ref CV_32F depth.
 - Although, if GMat with height != 1, width != 1, channels != 1 given as data, n-dimensional
samples are considered given in amount of A, where A = height, n = width * channels.
 - In case of GMat given as data:
     - the output labels are returned as 1-channel GMat with sizes
width = 1, height = A, where A is samples amount, or width = bestLabels.width,
height = bestLabels.height if bestLabels given;
     - the cluster centers are returned as 1-channel GMat with sizes
width = n, height = K, where n is samples' dimentionality and K is clusters' amount.
 - As one of possible usages, if you want to control the initial labels for each attempt
by yourself, you can utilize just the core of the function. To do that, set the number
of attempts to 1, initialize labels each time using a custom algorithm, pass them with the
( flags = #KMEANS_USE_INITIAL_LABELS ) flag, and then choose the best (most-compact) clustering.

@param data Data for clustering. An array of N-Dimensional points with float coordinates is needed.
Function can take GArray<Point2f>, GArray<Point3f> for 2D and 3D cases or GMat for any
dimentionality and channels.
@param K Number of clusters to split the set by.
@param bestLabels Optional input integer array that can store the supposed initial cluster indices
for every sample. Used when ( flags = #KMEANS_USE_INITIAL_LABELS ) flag is set.
@param criteria The algorithm termination criteria, that is, the maximum number of iterations
and/or the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of
the cluster centers moves by less than criteria.epsilon on some iteration, the algorithm stops.
@param attempts Flag to specify the number of times the algorithm is executed using different
initial labellings. The algorithm returns the labels that yield the best compactness (see the first
function return value).
@param flags Flag that can take values of cv::KmeansFlags .

@return
 - Compactness measure that is computed as
\f[\sum _i  \| \texttt{samples} _i -  \texttt{centers} _{ \texttt{labels} _i} \| ^2\f]
after every attempt. The best (minimum) value is chosen and the corresponding labels and the
compactness value are returned by the function.
 - Integer array that stores the cluster indices for every sample.
 - Array of the cluster centers.
*/
GAPI_EXPORTS std::tuple<GOpaque<double>,GMat,GMat>
kmeans(const GMat& data, const int K, const GMat& bestLabels,
       const TermCriteria& criteria, const int attempts, const KmeansFlags flags);

/** @overload
@note
 - Function textual ID is "org.opencv.core.kmeansNDNoInit"
 - #KMEANS_USE_INITIAL_LABELS flag must not be set while using this overload.
 */
GAPI_EXPORTS_W std::tuple<GOpaque<double>,GMat,GMat>
kmeans(const GMat& data, const int K, const TermCriteria& criteria, const int attempts,
       const KmeansFlags flags);

/** @overload
@note Function textual ID is "org.opencv.core.kmeans2D"
 */
GAPI_EXPORTS_W std::tuple<GOpaque<double>,GArray<int>,GArray<Point2f>>
kmeans(const GArray<Point2f>& data, const int K, const GArray<int>& bestLabels,
       const TermCriteria& criteria, const int attempts, const KmeansFlags flags);

/** @overload
@note Function textual ID is "org.opencv.core.kmeans3D"
 */
GAPI_EXPORTS std::tuple<GOpaque<double>,GArray<int>,GArray<Point3f>>
kmeans(const GArray<Point3f>& data, const int K, const GArray<int>& bestLabels,
       const TermCriteria& criteria, const int attempts, const KmeansFlags flags);

namespace streaming {
/** @brief Gets dimensions from Mat.

@note Function textual ID is "org.opencv.streaming.size"

@param src Input tensor
@return Size (tensor dimensions).
*/
GAPI_EXPORTS_W GOpaque<Size> size(const GMat& src);

/** @overload
Gets dimensions from rectangle.

@note Function textual ID is "org.opencv.streaming.sizeR"

@param r Input rectangle.
@return Size (rectangle dimensions).
*/
GAPI_EXPORTS_W GOpaque<Size> size(const GOpaque<Rect>& r);

/** @brief Gets dimensions from MediaFrame.

@note Function textual ID is "org.opencv.streaming.sizeMF"

@param src Input frame
@return Size (frame dimensions).
*/
GAPI_EXPORTS GOpaque<Size> size(const GFrame& src);
} //namespace streaming
} //namespace gapi
} //namespace cv

#endif //OPENCV_GAPI_CORE_HPP