zoukankan      html  css  js  c++  java
  • 伪分布模式 hive查询

    [root@node1 ~]# lscpu
    Architecture: x86_64
    CPU op-mode(s): 32-bit, 64-bit
    Byte Order: Little Endian
    CPU(s): 1
    On-line CPU(s) list: 0
    Thread(s) per core: 1
    Core(s) per socket: 1
    Socket(s): 1
    NUMA node(s): 1
    Vendor ID: GenuineIntel
    CPU family: 6
    Model: 58
    Stepping: 8
    CPU MHz: 2299.062
    BogoMIPS: 4598.12
    L1d cache: 32K
    L1d cache: 32K
    L2d cache: 6144K
    NUMA node0 CPU(s): 0

    [root@node1 ~]# free -m
    total used free shared buffers cached
    Mem: 996 647 348 0 9 109
    -/+ buffers/cache: 527 468
    Swap: 1839 0 1839

    [root@node1 ~]# cat /proc/meminfo
    MemTotal: 1020348 kB
    MemFree: 357196 kB
    Buffers: 10156 kB
    Cached: 112464 kB
    SwapCached: 0 kB
    Active: 505360 kB
    Inactive: 70812 kB
    Active(anon): 453560 kB
    Inactive(anon): 196 kB
    Active(file): 51800 kB
    Inactive(file): 70616 kB
    Unevictable: 0 kB
    Mlocked: 0 kB
    SwapTotal: 1884152 kB
    SwapFree: 1884152 kB
    Dirty: 96 kB
    Writeback: 0 kB
    AnonPages: 453564 kB
    Mapped: 25552 kB
    Shmem: 208 kB
    Slab: 63916 kB
    SReclaimable: 12588 kB
    SUnreclaim: 51328 kB
    KernelStack: 2280 kB
    PageTables: 5644 kB
    NFS_Unstable: 0 kB
    Bounce: 0 kB
    WritebackTmp: 0 kB
    CommitLimit: 2394324 kB
    Committed_AS: 722540 kB
    VmallocTotal: 34359738367 kB
    VmallocUsed: 7852 kB
    VmallocChunk: 34359717412 kB
    HardwareCorrupted: 0 kB
    AnonHugePages: 358400 kB
    HugePages_Total: 0
    HugePages_Free: 0
    HugePages_Rsvd: 0
    HugePages_Surp: 0
    Hugepagesize: 2048 kB
    DirectMap4k: 8128 kB
    DirectMap2M: 1040384 kB

    [root@node1 ~]# lsblk
    NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
    sr0 11:0 1 1024M 0 rom
    sda 8:0 0 18G 0 disk
    鈹溾攢sda1 8:1 0 500M 0 part /boot
    鈹斺攢sda2 8:2 0 17.5G 0 part
    鈹溾攢vg_node1-lv_root (dm-0) 253:0 0 15.7G 0 lvm /
    鈹斺攢vg_node1-lv_swap (dm-1) 253:1 0 1.8G 0 lvm [SWAP]

    [root@node1 ~]# fdisk -l

    Disk /dev/sda: 19.3 GB, 19327352832 bytes
    255 heads, 63 sectors/track, 2349 cylinders
    Units = cylinders of 16065 * 512 = 8225280 bytes
    Sector size (logical/physical): 512 bytes / 512 bytes
    I/O size (minimum/optimal): 512 bytes / 512 bytes
    Disk identifier: 0x000ecb12

    Device Boot Start End Blocks Id System
    /dev/sda1 * 1 64 512000 83 Linux
    Partition 1 does not end on cylinder boundary.
    /dev/sda2 64 2350 18361344 8e Linux LVM

    Disk /dev/mapper/vg_node1-lv_root: 16.9 GB, 16869490688 bytes
    255 heads, 63 sectors/track, 2050 cylinders
    Units = cylinders of 16065 * 512 = 8225280 bytes
    Sector size (logical/physical): 512 bytes / 512 bytes
    I/O size (minimum/optimal): 512 bytes / 512 bytes
    Disk identifier: 0x00000000

    Disk /dev/mapper/vg_node1-lv_swap: 1929 MB, 1929379840 bytes
    255 heads, 63 sectors/track, 234 cylinders
    Units = cylinders of 16065 * 512 = 8225280 bytes
    Sector size (logical/physical): 512 bytes / 512 bytes
    I/O size (minimum/optimal): 512 bytes / 512 bytes
    Disk identifier: 0x00000000

    [hadoop@node1 root]hadoopdfsadminreportWarning:HADOOP_HOME is deprecated.

    Configured Capacity: 16604643328 (15.46 GB)
    Present Capacity: 13766094848 (12.82 GB)
    DFS Remaining: 13747478528 (12.8 GB)
    DFS Used: 18616320 (17.75 MB)
    DFS Used%: 0.14%
    Under replicated blocks: 30
    Blocks with corrupt replicas: 0
    Missing blocks: 0


    Datanodes available: 1 (1 total, 0 dead)

    Name: 127.0.0.1:50010
    Decommission Status : Normal
    Configured Capacity: 16604643328 (15.46 GB)
    DFS Used: 18616320 (17.75 MB)
    Non DFS Used: 2838548480 (2.64 GB)
    DFS Remaining: 13747478528(12.8 GB)
    DFS Used%: 0.11%
    DFS Remaining%: 82.79%
    Last contact: Sun Jul 05 01:31:44 EDT 2015

    hive> select year,avg(air) from ncdc group by year;
    Total MapReduce jobs = 1
    Launching Job 1 out of 1
    Number of reduce tasks not specified. Estimated from input data size: 1
    In order to change the average load for a reducer (in bytes):
    set hive.exec.reducers.bytes.per.reducer=
    In order to limit the maximum number of reducers:
    set hive.exec.reducers.max=
    In order to set a constant number of reducers:
    set mapred.reduce.tasks=
    Starting Job = job_201507050117_0001, Tracking URL = http://node1:50030/jobdetails.jsp?jobid=job_201507050117_0001
    Kill Command = /opt/software/hadoop-1.2.1/libexec/../bin/hadoop job -kill job_201507050117_0001
    Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
    2015-07-05 01:33:08,403 Stage-1 map = 0%, reduce = 0%
    2015-07-05 01:33:18,463 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:19,470 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:20,479 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:21,486 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:22,492 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:23,500 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:24,505 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:25,510 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:26,517 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:27,530 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:28,541 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:29,549 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:30,556 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:31,562 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:32,568 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:33,574 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:34,579 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:35,591 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 3.36 sec
    2015-07-05 01:33:36,605 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.38 sec
    2015-07-05 01:33:37,611 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.38 sec
    2015-07-05 01:33:38,620 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.38 sec
    2015-07-05 01:33:39,626 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.38 sec
    2015-07-05 01:33:40,640 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.38 sec
    2015-07-05 01:33:41,646 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.38 sec
    MapReduce Total cumulative CPU time: 5 seconds 380 msec
    Ended Job = job_201507050117_0001
    MapReduce Jobs Launched:
    Job 0: Map: 1 Reduce: 1 Cumulative CPU: 5.38 sec HDFS Read: 17013533 HDFS Write: 537 SUCCESS
    Total MapReduce CPU Time Spent: 5 seconds 380 msec
    OK
    1901 45.16831683168317
    1902 21.659558263518658
    1903 -17.67699115044248
    1904 33.32224247948952
    1905 43.3322664228014
    1906 47.0834855681403
    1907 28.09189090243456
    1908 28.80607441154138
    1909 25.24907112526539
    1910 29.00013071895425
    1911 28.088644112247575
    1912 16.801145236855803
    1913 8.191569568197396
    1914 26.378301131816624
    1915 2.811635615498914
    1916 21.42393787117405
    1917 22.895140080045742
    1918 27.712506047411708
    1919 23.67520250849229
    1920 43.508667830133795
    1921 31.834957020057306
    1922 -44.03716409376787
    1923 26.79247747159462
    Time taken: 68.15 seconds, Fetched: 23 row(s)

    hive> select year,max(air) from ncdc group by year;
    Total MapReduce jobs = 1
    Launching Job 1 out of 1
    Number of reduce tasks not specified. Estimated from input data size: 1
    In order to change the average load for a reducer (in bytes):
    set hive.exec.reducers.bytes.per.reducer=
    In order to limit the maximum number of reducers:
    set hive.exec.reducers.max=
    In order to set a constant number of reducers:
    set mapred.reduce.tasks=
    Starting Job = job_201507050117_0004, Tracking URL = http://node1:50030/jobdetails.jsp?jobid=job_201507050117_0004
    Kill Command = /opt/software/hadoop-1.2.1/libexec/../bin/hadoop job -kill job_201507050117_0004
    Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
    2015-07-05 01:40:13,809 Stage-1 map = 0%, reduce = 0%
    2015-07-05 01:40:24,856 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:25,863 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:26,868 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:27,873 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:28,881 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:29,885 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:30,893 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:31,897 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:32,906 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:33,912 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:34,917 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:35,924 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:36,928 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:37,933 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.48 sec
    2015-07-05 01:40:38,938 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.69 sec
    2015-07-05 01:40:39,943 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.69 sec
    2015-07-05 01:40:40,950 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.69 sec
    2015-07-05 01:40:41,956 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.69 sec
    2015-07-05 01:40:42,968 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.69 sec
    MapReduce Total cumulative CPU time: 5 seconds 690 msec
    Ended Job = job_201507050117_0004
    MapReduce Jobs Launched:
    Job 0: Map: 1 Reduce: 1 Cumulative CPU: 5.69 sec HDFS Read: 17013533 HDFS Write: 184 SUCCESS
    Total MapReduce CPU Time Spent: 5 seconds 690 msec
    OK
    1901 94
    1902 94
    1903 94
    1904 94
    1905 94
    1906 94
    1907 94
    1908 94
    1909 94
    1910 94
    1911 94
    1912 94
    1913 94
    1914 94
    1915 94
    1916 94
    1917 94
    1918 94
    1919 94
    1920 94
    1921 94
    1922 94
    1923 94
    Time taken: 66.373 seconds, Fetched: 23 row(s)

    hive> select count(*) from ncdc;
    Total MapReduce jobs = 1
    Launching Job 1 out of 1
    Number of reduce tasks determined at compile time: 1
    In order to change the average load for a reducer (in bytes):
    set hive.exec.reducers.bytes.per.reducer=
    In order to limit the maximum number of reducers:
    set hive.exec.reducers.max=
    In order to set a constant number of reducers:
    set mapred.reduce.tasks=
    Starting Job = job_201507050117_0006, Tracking URL = http://node1:50030/jobdetails.jsp?jobid=job_201507050117_0006
    Kill Command = /opt/software/hadoop-1.2.1/libexec/../bin/hadoop job -kill job_201507050117_0006
    Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
    2015-07-05 02:09:03,771 Stage-1 map = 0%, reduce = 0%
    2015-07-05 02:09:12,807 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:13,812 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:14,817 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:15,821 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:16,826 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:17,831 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:18,837 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:19,843 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:20,850 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:21,856 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:22,863 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:23,871 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:24,876 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.43 sec
    2015-07-05 02:09:25,880 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.55 sec
    2015-07-05 02:09:26,886 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.55 sec
    2015-07-05 02:09:27,891 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.55 sec
    2015-07-05 02:09:28,900 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.55 sec
    2015-07-05 02:09:29,907 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.55 sec
    2015-07-05 02:09:30,912 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.55 sec
    MapReduce Total cumulative CPU time: 4 seconds 550 msec
    Ended Job = job_201507050117_0006
    MapReduce Jobs Launched:
    Job 0: Map: 1 Reduce: 1 Cumulative CPU: 4.55 sec HDFS Read: 17013533 HDFS Write: 7 SUCCESS
    Total MapReduce CPU Time Spent: 4 seconds 550 msec
    OK
    335346
    Time taken: 64.48 seconds, Fetched: 1 row(s)

    查询1920的平均气温,比1923年高多少或者低多少。
    select a-b from(
    select max(air) as a from ncdc where year=1920
    select max(air) as b from ncdc where year=1923)

    版权声明:本文为博主原创文章,未经博主允许不得转载。

  • 相关阅读:
    任务二 发布作业信息(已完成)
    查看作业信息(任务一 已完成)
    项目冲刺任务之任务场景分析(四)
    项目冲刺之任务场景分析(三)
    解析XML文件的两种方式 SAX和DOM
    解析XML文件的两种方式 SAX和DOM
    iOS开发代码规范(通用)
    随机创建点击对象
    自定义加载等待框(MBProgressHUD)
    KVO、KVC
  • 原文地址:https://www.cnblogs.com/mrcharles/p/4731714.html
Copyright © 2011-2022 走看看