Oracle 10g CRS 제거

 

[root@rac1 bin]# cd ..

[root@rac1 db]# ls

bin cv inventory lib oracore racg srvm

cdata diagnostics jdk log oraInst.loc rdbms xdk

cfgtoollogs evm jlib network oui relnotes

clone has jre nls perl root.sh

crs install JRE OPatch plsql slax

css install.platform ldap opmn precomp sqlplus

[root@rac1 db]#

[root@rac1 db]# cd install

[root@rac1 install]# ls

cluster.ini install.incl readme.txt rootdeletenode.sh rootupgrade

cmdllroot.sh make.log rootaddnode.sbs rootdelete.sh templocal

envVars.properties paramfile.crs rootconfig rootinstall

install.excl preupdate.sh rootdeinstall.sh rootlocaladd

[root@rac1 install]#

[root@rac1 install]# ./rootdelete.sh

Shutting down Oracle Cluster Ready Services (CRS):

Stopping resources.

Error while stopping resources. Possible cause: CRSD is down.

Stopping CSSD.

Unable to communicate with the CSS daemon.

Shutdown has begun. The daemons should exit soon.

Checking to see if Oracle CRS stack is down...

Oracle CRS stack is not running.

Oracle CRS stack is down now.

Removing script for Oracle Cluster Ready services

Updating ocr file for downgrade

Cleaning up SCR settings in '/etc/oracle/scls_scr'

[root@rac1 install]#

[root@rac1 etc]# ls initt*

inittab inittab.no_crs inittab.orig

[root@rac1 etc]#

[root@rac1 etc]# rm -f /etc/init.d/init.cssd

[root@rac1 etc]# rm -f /etc/init.d/init.crs

[root@rac1 etc]# rm -f /etc/init.d/init.crsd

[root@rac1 etc]# rm -f /etc/init.d/init.evmd

[root@rac1 etc]# rm -f /etc/rc2.d/K96init.crs

[root@rac1 etc]# rm -f /etc/rc2.d/S96init.crs

[root@rac1 etc]# rm -f /etc/rc3.d/K96init.crs

[root@rac1 etc]# rm -f /etc/rc3.d/S96init.crs

[root@rac1 etc]# rm -f /etc/rc5.d/K96init.crs

[root@rac1 etc]# rm -f /etc/rc5.d/S96init.crs

[root@rac1 etc]# rm -Rf /etc/oracle/scls_scr

[root@rac1 etc]# rm -f /etc/inittab.crs

[root@rac1 etc]# cp /etc/inittab.orig /etc/inittab

cp: overwrite `/etc/inittab'?

[root@rac1 etc]#

[root@rac1 etc]#

[root@rac1 etc]#

[root@rac1 etc]# ls initt*

inittab inittab.no_crs inittab.orig

[root@rac1 etc]#

   

블로그 이미지

운명을바꾸는자

IT와 함께 살아가는 삶

,

 

  1. LISTENER TEST

       

    1. srvctl stop listener -n rac1 -l LISTENER
      1. rac1@+ASM1 : /app/grid/11g/log/rac1> tail -f alertrac1.log

2012-07-31 10:08:54.011

[/app/grid/11g/bin/oraagent.bin(4026)]CRS-5016:Process "/app/grid/11g/bin/lsnrctl" spawned by agent "/app/grid/11g/bin/oraagent.bin" for action "check" failed: details at "(:CLSN00010:)" in "/app/grid/11g/log/rac1/agent/crsd/oraagent_oragrid/oraagent_oragrid.log"

   

  1. crs_stat -v -t

rac1@+ASM1 : /app/grid> crs_stat -v -t

Name Type R/RA F/FT Target State Host

----------------------------------------------------------------------

ora.CRSDATA.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1

ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE rac2

ora....N1.lsnr ora....er.type 0/5 0/0 ONLINE ONLINE rac2

ora.ORADB.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1

ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE rac1

ora.cvu ora.cvu.type 0/5 0/0 ONLINE ONLINE rac2

ora.gsd ora.gsd.type 0/5 0/ OFFLINE OFFLINE

ora....network ora....rk.type 0/5 0/ ONLINE ONLINE rac1

ora.oc4j ora.oc4j.type 0/1 0/2 OFFLINE OFFLINE

ora.ons ora.ons.type 0/3 0/ ONLINE ONLINE rac1

ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1

ora....C1.lsnr application 0/5 0/0 OFFLINE OFFLINE

ora.rac1.gsd application 0/5 0/0 OFFLINE OFFLINE

ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1

ora.rac1.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac1

ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2

ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2

ora.rac2.gsd application 0/5 0/0 OFFLINE OFFLINE

ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2

ora.rac2.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac2

ora.racdb.db ora....se.type 0/2 0/1 ONLINE ONLINE rac1

ora.scan1.vip ora....ip.type 0/0 0/0 ONLINE ONLINE rac2

rac1@+ASM1 : /app/grid>

   

   

  1. lsnrctl status

rac1@RACDB1 : /app/oracle> lsnrctl status

LSNRCTL for Linux: Version 11.2.0.3.0 - Production on 31-JUL-2012 10:11:08

   

Copyright (c) 1991, 2011, Oracle. All rights reserved.

   

Connecting to (ADDRESS=(PROTOCOL=tcp)(HOST=)(PORT=1521))

TNS-12541: TNS:no listener

TNS-12560: TNS:protocol adapter error

TNS-00511: No listener

Linux Error: 111: Connection refused

rac1@RACDB1 : /app/oracle>

   

   

  1. srvctl start listener -n rac1 -l LISTENER 올라오는데 시간이 걸림

       

   

  1. 한쪽 DB 내리기
    1. <oracle/ oragrid>srvctl status instance -d RACDB -i RACDB2
    2. <oracle/ oragrid>srvctl stop instance -d RACDB -i RACDB2

rac2@RACDB2 : /app/oracle> srvctl stop instance -d RACDB -i RACDB2

rac2@RACDB2 : /app/oracle>

rac2@RACDB2 : /app/oracle>

rac2@RACDB2 : /app/oracle> ss

   

SQL*Plus: Release 11.2.0.3.0 Production on Tue Jul 31 10:22:09 2012

   

Copyright (c) 1982, 2011, Oracle. All rights reserved.

   

Connected to an idle instance.

  1. <root> crsctl stop crs / crsctl stop crs -f(강제)

[root@rac2 bin]# ./crsctl stop crs

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'rac2'

CRS-2673: Attempting to stop 'ora.crsd' on 'rac2'

CRS-2790: Starting shutdown of Cluster Ready Services-managed resources on 'rac2'

CRS-2673: Attempting to stop 'ora.LISTENER_SCAN1.lsnr' on 'rac2'

CRS-2673: Attempting to stop 'ora.cvu' on 'rac2'

CRS-2673: Attempting to stop 'ora.LISTENER.lsnr' on 'rac2'

CRS-2673: Attempting to stop 'ora.CRSDATA.dg' on 'rac2'

CRS-2673: Attempting to stop 'ora.ORADB.dg' on 'rac2'

CRS-2677: Stop of 'ora.cvu' on 'rac2' succeeded

CRS-2672: Attempting to start 'ora.cvu' on 'rac1'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'rac2' has completed

CRS-4133: Oracle High Availability Services has been stopped.

[root@rac2 bin]#

  

  1. 확인

rac1@+ASM1 : /app/grid> crs_stat -v -t

Name Type R/RA F/FT Target State Host

----------------------------------------------------------------------

ora.CRSDATA.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1

ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE rac1

ora....N1.lsnr ora....er.type 0/5 0/0 ONLINE ONLINE rac1

ora.ORADB.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1

ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE rac1

ora.cvu ora.cvu.type 0/5 0/0 ONLINE ONLINE rac1

ora.gsd ora.gsd.type 0/5 0/ OFFLINE OFFLINE

ora....network ora....rk.type 0/5 0/ ONLINE ONLINE rac1

ora.oc4j ora.oc4j.type 0/1 0/2 OFFLINE OFFLINE

ora.ons ora.ons.type 0/3 0/ ONLINE ONLINE rac1

ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1

ora....C1.lsnr application 0/5 0/0 ONLINE ONLINE rac1

ora.rac1.gsd application 0/5 0/0 OFFLINE OFFLINE

ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1

ora.rac1.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac1

ora.rac2.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac1

ora.racdb.db ora....se.type 0/2 0/1 ONLINE ONLINE rac1

ora.scan1.vip ora....ip.type 0/0 0/0 ONLINE ONLINE rac1

rac1@+ASM1 : /app/grid>

   

  1. 한쪽 DB 올리기
    1. ./crsctl start crs --올라오는데 시간이 걸림

[root@rac2 bin]# ./crsctl start crs

CRS-4123: Oracle High Availability Services has been started.

  1. 확인
    1. rac2@+ASM2 : /app/grid/11g/log/rac2> tail -f alertrac2.log
    2. rac2@+ASM2 : /app/grid/app/oragrid/diag/asm/+asm/+ASM2/trace> tail -f alert_+ASM2.log

rac2@+ASM2 : /app/grid> crs_stat -t -v

Name Type R/RA F/FT Target State Host

----------------------------------------------------------------------

ora.CRSDATA.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1

ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE rac1

ora....N1.lsnr ora....er.type 0/5 0/0 ONLINE ONLINE rac1

ora.ORADB.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1

ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE rac1

ora.cvu ora.cvu.type 0/5 0/0 ONLINE ONLINE rac1

ora.gsd ora.gsd.type 0/5 0/ OFFLINE OFFLINE

ora....network ora....rk.type 0/5 0/ ONLINE ONLINE rac1

ora.oc4j ora.oc4j.type 0/1 0/2 OFFLINE OFFLINE

ora.ons ora.ons.type 0/3 0/ ONLINE ONLINE rac1

ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1

ora....C1.lsnr application 0/5 0/0 ONLINE ONLINE rac1

ora.rac1.gsd application 0/5 0/0 OFFLINE OFFLINE

ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1

ora.rac1.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac1

ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2

ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2

ora.rac2.gsd application 0/5 0/0 OFFLINE OFFLINE

ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2

ora.rac2.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac2

ora.racdb.db ora....se.type 0/2 0/1 ONLINE ONLINE rac1

ora.scan1.vip ora....ip.type 0/0 0/0 ONLINE ONLINE rac1

rac2@+ASM2 : /app/grid>

  1. 한쪽 DB 올리기
    1. rac2@RACDB2 : /app/oracle> srvctl start instance -d RACDB -i RACDB2
    2. rac2@RACDB2 : /app/oracle/diag/rdbms/racdb/RACDB2/trace> tail -f alert_RACDB2.log

       

  1. Archive log list 설정

       

    1. 1 번과 2번노드에 동일한 위치에 디렉토리 생성

       

       

    1. 한쪽노드에서 명령어 입력

SQL> ALTER SYSTEM SET

2 LOG_ARCHIVE_START=TRUE SCOPE=SPFILE;

   

System altered.

   

SQL> ALTER SYSTEM SET

2 LOG_ARCHIVE_DEST='/data/archive'

3 SCOPE=SPFILE;

   

System altered.

   

SQL> ALTER SYSTEM SET

2 LOG_ARCHIVE_FORMAT='%S.ARC' SCOPE=SPFILE; -- 잘못된 입력-> 추후에 변경

   

System altered.

   

  1. 1번 2번 노드 DB shutdown

srvctl stop database -d RACDB

   

  1. 초기 아카이브 설정 형식을 잘못 입력하여 에러발행

SQL> startup mount;

ORA-32004: obsolete or deprecated parameter(s) specified for RDBMS instance

ORA-19905: log_archive_format must contain %s, %t and %r

SQL>

SQL> create pfile from spfile='+ORADB/RACDB/spfileRACDB.ora'

2 ;

   

File created.

   

Pfile 생성후 해당 포맷변경

   

*.log_archive_dest='/data/archive'

*.log_archive_format=orcl_%s_%t_%r.arc

#*.log_archive_format='%S.ARC'

*.log_archive_start=TRUE

*.memory_target=1616904192

   

<원복> 1번 노드와 2번노드가 같이 바라보고 있음

Create spfile='+ORADB/RACDB/spfileRACDB.ora' from pfile

   

  1. 아카이브로그 설정 진행

SQL> startup mount;

ORA-32004: obsolete or deprecated parameter(s) specified for RDBMS instance

ORACLE instance started.

   

Total System Global Area 1620115456 bytes

Fixed Size 2228824 bytes

Variable Size 1207963048 bytes

Database Buffers 402653184 bytes

Redo Buffers 7270400 bytes

Database mounted.

SQL> ALTER DATABASE ARCHIVELOG;

   

Database altered.

   

SQL> ALTER DATABASE OPEN;

   

Database altered.

   

SQL> archive log list

Database log mode Archive Mode

Automatic archival Enabled

Archive destination /data/archive

Oldest online log sequence 43

Next log sequence to archive 44

Current log sequence 44

   

SQL> shutdown immediate

Database closed.

Database dismounted.

  

   

  1. 1번, 2번노드 내린 뒤, 동시에 올려줌.

rac1@+ASM1 : /app/grid> srvctl start database -d RACDB

   

  1. VOTE/ OCR 백업 및 복구하기
    1. Vote

: 노드들의 출석부

: 1초에 한번씩 노드들이 살아있나 죽어있나를 체크해서 적어 놓는다.

: 다른 노드들이 살았나 죽었나 관리하는 디스크 = vote disk

   

※ halt bit : 모든 노드들에게 핑 날려봄

crs가 halt bit를 날려 각 노드들의 생사(?) 여부를 조사한다. 특정 노드에서 반응이 없으면 바로 죽었다고

판단하는 것이 아니라 vote disk로 가본다. 이곳에 해당 노드가 적혀 있다면, 다른업무때문에 바빠서 반응이 없었다고 판단한다.

문제는 vote 디스크를 뒤졌는데 해당노드가 없다. crs는 해당노드가 죽었다고 판단하고 재부팅시켜버린다.

   

# crsctl add css votedisk <new voting disk path> -force

# crsctl delete css votedisk <old voting disk path> -force

  

  1. OCR

 : RAC 상의 모든 노드들에 대한 정보와 모든 자원들에 대한 정보 저장 - 에러나면 RAC전체 중단

 : 윈도우의 레지스트리 역할 

 : OCR 정보를 잘 관리하여야 한다.

rac1@+ASM1 : /app/grid> ocrcheck

Status of Oracle Cluster Registry is as follows :

Version : 3

Total space (kbytes) : 262120

Used space (kbytes) : 2864

Available space (kbytes) : 259256

ID : 678271056

Device/File Name : +CRSDATA

Device/File integrity check succeeded

   

Device/File not configured

   

Device/File not configured

   

Device/File not configured

   

Device/File not configured

   

Cluster registry integrity check succeeded

   

Logical corruption check bypassed due to non-privileged user

   

rac1@+ASM1 : /app/grid> '

   

rac1@+ASM1 : /app/grid> ocrconfig -showbackup

   

rac1 2012/07/31 06:12:16 /app/grid/11g/cdata/rac-cluster/backup00.ocr

   

rac1 2012/07/31 02:12:15 /app/grid/11g/cdata/rac-cluster/backup01.ocr

   

rac1 2012/07/30 22:12:13 /app/grid/11g/cdata/rac-cluster/backup02.ocr

   

rac1 2012/07/30 14:12:10 /app/grid/11g/cdata/rac-cluster/day.ocr

   

rac2 2012/07/24 13:07:55 /app/grid/11g/cdata/rac-cluster/week.ocr

PROT-25: Manual backups for the Oracle Cluster Registry are not available

rac1@+ASM1 : /app/grid>

   

Recovery OCR Disk using Export Backup

• CRS Stop 후 OCR Disk Import

# ocrconfig –import /shared/export/ocrback.dmp

   

cluvfy 명령어를 이용하여 복원이 잘 되었나 확인

 [root@rac1]# cluvfy comp ocr –n all

   

Recovery OCR Disk using Automatic Backup

• CRS Stop 후 OCR Backup Restore

# ocrconfig –restore $ORA_CRS_HOME/cdata/crs/day.ocr

  

   

[참고문헌]

http://dinggur.tistory.com/219

블로그 이미지

운명을바꾸는자

IT와 함께 살아가는 삶

,


  • ORACLE VPD란?

     

    • Virtual Private Database(이하 VPD) 란 하나의 DATABASE내의 수 많은 data 에서 보안상 혹은 특정 서비스를 목적으로 필요한 유저에 해당하는 data만을 다룰 수 있게 하는 기술을 말한다.
    • 일반적으로 보안레벨, 부서별 혹은 사이트별 등 특정 목적에 맞게 구분되지만, 사용자는 이러한 구분을 받고 있다는 것을 전혀 알지 못하며, 마치 자신만의 DATABASE 를 다루는 것처럼 보이게 되어 Virtual Private Database 라고 부른다.

   

  1. 스키마 단위 TEST Script 및 확인
  • User, table 생성

SYS> GRANT CREATE SESSION, CREATE ANY CONTEXT, CREATE PROCEDURE, CREATE TRIGGER, ADMINISTER DATABASE TRIGGER TO sysadmin_vpd IDENTIFIED BY test;

   

Grant succeeded.

   

SYS>

SYS>

SYS> GRANT EXECUTE ON DBMS_SESSION TO sysadmin_vpd;

   

Grant succeeded.

   

SYS>

SYS>

SYS> GRANT EXECUTE ON DBMS_RLS TO sysadmin_vpd;

   

Grant succeeded.

   

SYS>

SYS> GRANT CREATE SESSION TO tbrooke IDENTIFIED BY test;

   

Grant succeeded.

   

SYS>

SYS>

SYS> GRANT CREATE SESSION TO owoods IDENTIFIED BY test;

   

Grant succeeded.

   

SYS>

SYS>

SYS> ALTER USER SCOTT ACCOUNT UNLOCK IDENTIFIED BY tiger

2 ;

   

User altered.

   

SYS>

SYS>

SYS> conn scott/tiger

Connected.

SCOTT>

SCOTT> create table customers (

cust_no number(4),

cust_email varchar2(20),

4 cust_name varchar2(20));

   

Table created.

   

SCOTT>

SCOTT>

SCOTT> CREATE TABLE orders_tab (

2 cust_no NUMBER(4),

3 order_no NUMBER(4));

   

Table created.

   

SCOTT>

SCOTT> INSERT INTO customers VALUES (1234, 'TBROOKE', 'Thadeus Brooke');

   

1 row created.

   

SCOTT>

SCOTT>

SCOTT> INSERT INTO customers VALUES (5678, 'OWOODS', 'Oberon Woods');

   

1 row created.

   

SCOTT>

SCOTT>

SCOTT> GRANT SELECT ON customers TO sysadmin_vpd;

   

Grant succeeded.

   

SCOTT>

SCOTT> select * from orders_tab;

   

no rows selected

   

SCOTT>

SCOTT>

SCOTT> INSERT INTO orders_tab VALUES (1234, 9876);

   

1 row created.

   

SCOTT>

SCOTT>

SCOTT> INSERT INTO orders_tab VALUES (5678, 5432);

   

1 row created.

   

SCOTT>

SCOTT>

SCOTT> INSERT INTO orders_tab VALUES (5678, 4592);

   

1 row created.

   

SCOTT>

SCOTT> GRANT SELECT ON orders_tab TO tbrooke;

   

Grant succeeded.

   

SCOTT>

SCOTT>

SCOTT> GRANT SELECT ON orders_tab TO owoods;

   

Grant succeeded.

   

SCOTT>

   

  • application context 생성

SCOTT> conn sysadmin_vpd/test

Connected.

SYSADMIN_VPD>

SYSADMIN_VPD>

SYSADMIN_VPD>

SYSADMIN_VPD>

SYSADMIN_VPD>

SYSADMIN_VPD> CREATE OR REPLACE CONTEXT orders_ctx USING orders_ctx_pkg;

   

Context created.

   

SYSADMIN_VPD>

SYSADMIN_VPD>

CREATE OR REPLACE PACKAGE orders_ctx_pkg

END; IS

IS

PROCEDURE set_custnum;

END;

5 /

   

Package created.

   

SYSADMIN_VPD>

SYSADMIN_VPD>

CREATE OR REPLACE PACKAGE BODY orders_ctx_pkg

IS

PROCEDURE set_custnum

AS

custnum NUMBER;

BEGIN

SELECT cust_no INTO custnum FROM scott.customers

WHERE cust_email = SYS_CONTEXT('USERENV', 'SESSION_USER');

DBMS_SESSION.SET_CONTEXT('orders_ctx', 'cust_no', custnum);

10 EXCEPTION

WHEN NO_DATA_FOUND THEN NULL;

END set_custnum;

13 END;

14 /

   

Package body created.

   

  • Logon trigger 생성

SYSADMIN_VPD>

CREATE TRIGGER set_custno_ctx_trig AFTER LOGON ON DATABASE

BEGIN

sysadmin_vpd.orders_ctx_pkg.set_custnum;

END;

5 /

   

Trigger created.

   

SYSADMIN_VPD>

SYS> conn tbrooke/test

Connected.

TBROOKE>

TBROOKE>

TBROOKE> SELECT SYS_CONTEXT('orders_ctx', 'cust_no') custnum FROM DUAL;

   

CUSTNUM

----------------------------------------------------------------------------------------------------

1234

   

TBROOKE>

  

   

   

  • DBMS_RLS 에 사용할 policy function 생성

SYSADMIN_VPD>

CREATE OR REPLACE FUNCTION get_user_orders(

orders_pred VARCHAR2 (400);

schema_p IN VARCHAR2,

table_p IN VARCHAR2)

RETURN VARCHAR2

AS

orders_pred VARCHAR2 (400);

BEGIN

orders_pred := 'cust_no = SYS_CONTEXT(''orders_ctx'', ''cust_no'')';

RETURN orders_pred;

END;

/

   

Function created.

   

SYSADMIN_VPD>

   

  • DBMS_RLS.ADD_POLICY 설정

SYSADMIN_VPD> BEGIN

DBMS_RLS.ADD_POLICY (

object_schema => 'scott',

object_name => 'orders_tab',

function_schema => 'sysadmin_vpd',

policy_name => 'orders_policy',

function_schema => 'sysadmin_vpd',

policy_function => 'get_user_orders',

statement_types => 'select');

END;

/

   

PL/SQL procedure successfully completed.

   

SYSADMIN_VPD>

   

  • TEST

       

   

SYS> select * from scott.orders_tab;

   

CUST_NO ORDER_NO

---------- ----------

1234 9876

5678 5432

5678 4592

   

SYS>

   

TBROOKE>

TBROOKE> select * from scott.orders_tab;

   

CUST_NO ORDER_NO

---------- ----------

1234 9876

   

TBROOKE>

   

SYS> conn owoods/test

Connected.

OWOODS>

OWOODS>

OWOODS>

OWOODS> select * from scott.orders_tab;

   

CUST_NO ORDER_NO

---------- ----------

5678 5432

5678 4592

   

OWOODS>

   

  • 확인

OWOODS> select * from scott.orders_tab where ORDER_NO=5432 and cust_no=5678;

   

CUST_NO ORDER_NO

---------- ----------

5678 5432

   

   

Execution Plan

----------------------------------------------------------

Plan hash value: 3940001695

   

---------------------------------------------------------------------------------

| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |

---------------------------------------------------------------------------------

| 0 | SELECT STATEMENT | | 1 | 26 | 3 (0)| 00:00:01 |

|* 1 | FILTER | | | | | |

|* 2 | TABLE ACCESS FULL| ORDERS_TAB | 1 | 26 | 3 (0)| 00:00:01 |

---------------------------------------------------------------------------------

   

Predicate Information (identified by operation id):

---------------------------------------------------

   

1 - filter(5678=TO_NUMBER(SYS_CONTEXT('orders_ctx','cust_no')))

2 - filter("ORDER_NO"=5432 AND "CUST_NO"=5678)

   

==========================================

OWOODS> select * from scott.orders_tab;

   

CUST_NO ORDER_NO

---------- ----------

5678 5432

5678 4592

   

   

Execution Plan

----------------------------------------------------------

Plan hash value: 1302839296

   

--------------------------------------------------------------------------------

| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |

--------------------------------------------------------------------------------

| 0 | SELECT STATEMENT | | 2 | 52 | 3 (0)| 00:00:01 |

|* 1 | TABLE ACCESS FULL| ORDERS_TAB | 2 | 52 | 3 (0)| 00:00:01 |

--------------------------------------------------------------------------------

   

Predicate Information (identified by operation id):

---------------------------------------------------

   

1 - filter("CUST_NO"=TO_NUMBER(SYS_CONTEXT('orders_ctx','cust_no')))

   

Note

-----

- dynamic sampling used for this statement

   

   

Statistics

----------------------------------------------------------

2 recursive calls

0 db block gets

8 consistent gets

0 physical reads

0 redo size

513 bytes sent via SQL*Net to client

385 bytes received via SQL*Net from client

2 SQL*Net roundtrips to/from client

0 sorts (memory)

0 sorts (disk)

2 rows processed

   

========================================================

   

OWOODS> select * from scott.orders_tab where order_no=5432;

   

CUST_NO ORDER_NO

---------- ----------

5678 5432

   

   

Execution Plan

----------------------------------------------------------

Plan hash value: 1302839296

   

--------------------------------------------------------------------------------

| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |

--------------------------------------------------------------------------------

| 0 | SELECT STATEMENT | | 1 | 26 | 3 (0)| 00:00:01 |

|* 1 | TABLE ACCESS FULL| ORDERS_TAB | 1 | 26 | 3 (0)| 00:00:01 |

--------------------------------------------------------------------------------

   

Predicate Information (identified by operation id):

---------------------------------------------------

   

1 - filter("ORDER_NO"=5432 AND "CUST_NO"=TO_NUMBER(SYS_CONTEXT('order

s_ctx','cust_no')))

   

Note

-----

- dynamic sampling used for this statement

   

   

Statistics

----------------------------------------------------------

2 recursive calls

0 db block gets

8 consistent gets

0 physical reads

0 redo size

473 bytes sent via SQL*Net to client

385 bytes received via SQL*Net from client

2 SQL*Net roundtrips to/from client

0 sorts (memory)

0 sorts (disk)

1 rows processed

  

   

  1. 컬럼단위 Script 및 확인
  • 권한 부여

       

GRANT CREATE SESSION, CREATE ANY CONTEXT, CREATE PROCEDURE, CREATE TRIGGER, ADMINISTER DATABASE TRIGGER TO SCOTT;

권한이 부여되었습니다.

SQL> GRANT EXECUTE ON DBMS_SESSION TO scott;

권한이 부여되었습니다.

SQL> GRANT EXECUTE ON DBMS_RLS TO scott;

권한이 부여되었습니다.

   

  • Funtion 및 policy 추가

CREATE OR REPLACE FUNCTION hide_sal_comm (

v_schema IN VARCHAR2,

v_objname IN VARCHAR2)

   

RETURN VARCHAR2 AS

con VARCHAR2 (200);

begin

con := 'deptno=30';

RETURN (con);

END hide_sal_comm;

/

 

 

BEGIN

DBMS_RLS.ADD_POLICY (

object_schema => 'scott',

object_name => 'emp',a

policy_name => 'hide_sal_policy',

policy_function => 'hide_sal_comm',

sec_relevant_cols => 'sal,comm');

END;

/

 

 

SELECT d.deptno, ENAME, d.dname, JOB , SAL

FROM emp e, dept d

WHERE d.deptno = e.deptno;

 

SELECT d.deptno, ENAME, d.dname, JOB , SAL

FROM emp e, dept d

3 WHERE d.deptno = e.deptno;

   

DEPTNO ENAME DNAME JOB SAL

---------- ---------- -------------- --------- ----------

30 ALLEN SALES SALESMAN 1600

30 WARD SALES SALESMAN 1250

30 MARTIN SALES SALESMAN 1250

30 BLAKE SALES MANAGER 2850

30 TURNER SALES SALESMAN 1500

30 JAMES SALES CLERK 950

   

6 rows selected.

   

   

Execution Plan

----------------------------------------------------------

Plan hash value: 568005898

   

----------------------------------------------------------------------------------------

| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |

----------------------------------------------------------------------------------------

| 0 | SELECT STATEMENT | | 5 | 170 | 4 (0)| 00:00:01 |

| 1 | NESTED LOOPS | | 5 | 170 | 4 (0)| 00:00:01 |

| 2 | TABLE ACCESS BY INDEX ROWID| DEPT | 1 | 13 | 1 (0)| 00:00:01 |

|* 3 | INDEX UNIQUE SCAN | PK_DEPT | 1 | | 0 (0)| 00:00:01 |

|* 4 | TABLE ACCESS FULL | EMP | 5 | 105 | 3 (0)| 00:00:01 |

----------------------------------------------------------------------------------------

   

Predicate Information (identified by operation id):

---------------------------------------------------

   

3 - access("D"."DEPTNO"=30)

4 - filter("DEPTNO"=30)

   

   

Statistics

----------------------------------------------------------

5 recursive calls

0 db block gets

10 consistent gets

0 physical reads

0 redo size

789 bytes sent via SQL*Net to client

385 bytes received via SQL*Net from client

2 SQL*Net roundtrips to/from client

0 sorts (memory)

0 sorts (disk)

6 rows processed

 

 

 

SELECT d.deptno, ENAME, d.dname, JOB

FROM emp e, dept d

WHERE d.deptno = e.deptno;

 

SELECT d.deptno, ENAME, d.dname, JOB

FROM emp e, dept d

WHERE d.deptno = e.deptno;

   

DEPTNO ENAME DNAME JOB

---------- ---------- -------------- ---------

20 SMITH RESEARCH CLERK

30 ALLEN SALES SALESMAN

30 WARD SALES SALESMAN

20 JONES RESEARCH MANAGER

30 MARTIN SALES SALESMAN

30 BLAKE SALES MANAGER

10 CLARK ACCOUNTING MANAGER

20 SCOTT RESEARCH ANALYST

10 KING ACCOUNTING PRESIDENT

30 TURNER SALES SALESMAN

20 ADAMS RESEARCH CLERK

30 JAMES SALES CLERK

20 FORD RESEARCH ANALYST

10 MILLER ACCOUNTING CLERK

   

14 rows selected.

   

   

Execution Plan

----------------------------------------------------------

Plan hash value: 351108634

   

----------------------------------------------------------------------------------------

| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |

----------------------------------------------------------------------------------------

| 0 | SELECT STATEMENT | | 14 | 420 | 4 (0)| 00:00:01 |

| 1 | NESTED LOOPS | | 14 | 420 | 4 (0)| 00:00:01 |

| 2 | TABLE ACCESS FULL | EMP | 14 | 238 | 3 (0)| 00:00:01 |

| 3 | TABLE ACCESS BY INDEX ROWID| DEPT | 1 | 13 | 1 (0)| 00:00:01 |

|* 4 | INDEX UNIQUE SCAN | PK_DEPT | 1 | | 0 (0)| 00:00:01 |

----------------------------------------------------------------------------------------

   

Predicate Information (identified by operation id):

---------------------------------------------------

   

4 - access("D"."DEPTNO"="DEPTNO")

   

   

Statistics

----------------------------------------------------------

4 recursive calls

0 db block gets

24 consistent gets

0 physical reads

0 redo size

980 bytes sent via SQL*Net to client

385 bytes received via SQL*Net from client

2 SQL*Net roundtrips to/from client

0 sorts (memory)

0 sorts (disk)

14 rows processed

 

   

exec DBMS_RLS.DROP_POLICY('scott','emp','hide_sal_policy');

 

BEGIN

DBMS_RLS.ADD_POLICY(

object_schema => 'scott',

object_name => 'emp',

policy_name => 'hide_sal_policy',

policy_function => 'hide_sal_comm',

sec_relevant_cols =>' sal,comm',

sec_relevant_cols_opt => dbms_rls.ALL_ROWS);

END;

/

 

SELECT ENAME, d.dname, job, sal, comm

FROM emp e, dept d

WHERE d.deptno = e.deptno;

 

 

SCOTT>

SCOTT> select ENAME, d.dname, job, sal, comm, d.deptno

2 from emp e, dept d

3 where d.deptno = e.deptno;

   

ENAME DNAME JOB SAL COMM DEPTNO

---------- -------------- --------- ---------- ---------- ----------

SMITH RESEARCH CLERK 20

ALLEN SALES SALESMAN 1600 300 30

WARD SALES SALESMAN 1250 500 30

JONES RESEARCH MANAGER 20

MARTIN SALES SALESMAN 1250 1400 30

BLAKE SALES MANAGER 2850 30

CLARK ACCOUNTING MANAGER 10

SCOTT RESEARCH ANALYST 20

KING ACCOUNTING PRESIDENT 10

TURNER SALES SALESMAN 1500 0 30

ADAMS RESEARCH CLERK 20

JAMES SALES CLERK 950 30

FORD RESEARCH ANALYST 20

MILLER ACCOUNTING CLERK 10

   

14 rows selected.

   

   

Execution Plan

----------------------------------------------------------

Plan hash value: 351108634

   

----------------------------------------------------------------------------------------

| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |

----------------------------------------------------------------------------------------

| 0 | SELECT STATEMENT | | 14 | 504 | 4 (0)| 00:00:01 |

| 1 | NESTED LOOPS | | 14 | 504 | 4 (0)| 00:00:01 |

| 2 | TABLE ACCESS FULL | EMP | 14 | 322 | 3 (0)| 00:00:01 |

| 3 | TABLE ACCESS BY INDEX ROWID| DEPT | 1 | 13 | 1 (0)| 00:00:01 |

|* 4 | INDEX UNIQUE SCAN | PK_DEPT | 1 | | 0 (0)| 00:00:01 |

----------------------------------------------------------------------------------------

   

Predicate Information (identified by operation id):

---------------------------------------------------

   

4 - access("D"."DEPTNO"="DEPTNO")

   

   

Statistics

----------------------------------------------------------

3 recursive calls

0 db block gets

24 consistent gets

0 physical reads

0 redo size

1129 bytes sent via SQL*Net to client

385 bytes received via SQL*Net from client

2 SQL*Net roundtrips to/from client

0 sorts (memory)

0 sorts (disk)

14 rows processed

   

   

   

   

  

   

   

   

   

   

   

'1. IT Story > DB' 카테고리의 다른 글

Oracle Golden Gate 개념 및 Installaton  (2) 2013.04.08
Oracle 11gR2 TDE(Transparent Data Encryption)  (0) 2013.03.31
Oracle 10g CRS 제거  (0) 2013.03.31
Oracle 11g RAC with ASM TEST  (2) 2013.03.31
Splunk install-Linux  (0) 2013.02.09
Big-Data Hive 환경구성(MySql)  (0) 2013.02.09
Big-Data Hadoop 환경구성  (0) 2013.02.09
Big-Data Hadoop기초  (0) 2013.02.09
블로그 이미지

운명을바꾸는자

IT와 함께 살아가는 삶

,



RedHat RPM install

   

If you are upgrading, review "How to upgrade Splunk" for instructions and migration considerations before proceeding.

To install the Splunk RPM in the default directory /opt/splunk:

rpm -i splunk_package_name.rpm

To install Splunk in a different directory, use the --prefix flag:

rpm -i --prefix=/opt/new_directory splunk_package_name.rpm

To upgrade an existing Splunk installation that resides in /opt/splunk using the RPM:

rpm -U splunk_package_name.rpm

To upgrade an existing Splunk installation that was done in a different directory, use the --prefix flag:

rpm -U --prefix=/opt/existing_directory splunk_package_name.rpm

Note: If you do not specify with --prefix for your existing directory, rpm will install in the default location of/opt/splunk.

For example, to upgrade to the existing directory of $SPLUNK_HOME=/opt/apps/splunk enter the following:

rpm -U --prefix=/opt/apps splunk_package_name.rpm

If you want to automate your RPM install with kickstart, add the following to your kickstart file:

./splunk start --accept-license
./splunk enable boot-start

Note: The second line is optional for the kickstart file.

   

Debian DEB install

To install the Splunk DEB package:

dpkg -i splunk_package_name.deb

Note: You can only install the Splunk DEB package in the default location, /opt/splunk.

   

Tar File Install

To install Splunk on a Linux system, expand the tarball into an appropriate directory using the tar command:

tar xvzf splunk_package_name.tgz

The default install directory is splunk in the current working directory. To install into /opt/splunk, use the following command:

tar xvzf splunk_package_name.tgz -C /opt

Note: When you install Splunk with a tarball:

  • Some non-GNU versions of tar might not have the -C argument available. In this case, if you want to install in /opt/splunk, either cd to /opt or place the tarball in /opt before running the tar command. This method will work for any accessible directory on your machine's filesystem.
  • Splunk does not create the splunk user automatically. If you want Splunk to run as a specific user, you must create the user manually before installing.
  • Ensure that the disk partition has enough space to hold the uncompressed volume of the data you plan to keep indexed.

       

  • Tar File Install를 형태로 설치진행

    -> 해당 리눅스 비트에 맞는 tgz 파일 다운(http://www.splunk.com/download?ac=get_splunk_download)

    -> /opt 해당 위치에 압축을 풀어주시면 되겠습니다.

       

    What gets Installed

       

    Splunk package status:

    dpkg --status splunk

    List all packages:

    dpkg --list

       

    Start Splunk

       

    Splunk can run as any user on the local system. If you run Splunk as a non-root user, make sure that Splunk has the appropriate permissions to read the inputs that you specify. Refer to the instructions for running Splunk as a non-root user for more information.

    To start Splunk from the command line interface, run the following command from $SPLUNK_HOME/bin directory (where $SPLUNK_HOME is the directory into which you installed Splunk):

    ./splunk start

    By convention, this document uses:

  • $SPLUNK_HOME to identify the path to your Splunk installation.
  • $SPLUNK_HOME/bin/ to indicate the location of the command line interface.

       

    Startup options

    The first time you start Splunk after a new installation, you must accept the license agreement. To start Splunk and accept the license in one step:

    $SPLUNK_HOME/bin/splunk start --accept-license

    Note: There are two dashes before the accept-license option.

       

    Launch Splunk Web and log in

    After you start Splunk and accept the license agreement,

    1. In a browser window, access Splunk Web at http://<hostname>:port. --해당 방식으로 Splunk 접속완료

    hostname is the host machine.

  • port is the port you specified during the installation (the default port is 8000).

    2. Splunk Web prompts you for login information (default, username admin and password changeme) before it launches. If you switch to Splunk Free, you will bypass this logon page in future sessions. --처음 유저명과 패스워드의 경우 위와 동일

    Now that you've installed Splunk, what comes next?

       

    RedHat Linux

    To uninstall from RedHat Linux

    rpm -e splunk_product_name

       

    Debian Linux

    To uninstall from Debian Linux:

    dpkg -r splunk

    To purge (delete everything, including configuration files):

    dpkg -P splunk

       

    원본 위치 <http://docs.splunk.com/Documentation/Splunk/latest/Installation/InstallonLinux>


'1. IT Story > DB' 카테고리의 다른 글

Oracle 11gR2 TDE(Transparent Data Encryption)  (0) 2013.03.31
Oracle 10g CRS 제거  (0) 2013.03.31
Oracle 11g RAC with ASM TEST  (2) 2013.03.31
Oracle VPD(Virtual Private Database)  (0) 2013.03.31
Big-Data Hive 환경구성(MySql)  (0) 2013.02.09
Big-Data Hadoop 환경구성  (0) 2013.02.09
Big-Data Hadoop기초  (0) 2013.02.09
Oracle RAC 11.2.0.3 with ASM / Vmware Oracle DB 설치  (0) 2013.02.09
블로그 이미지

운명을바꾸는자

IT와 함께 살아가는 삶

,



Hadoop-Hive

   

  • Hadoop 의 확장성과 가용성은 이해할 수 있었으나 자바 맵리듀스 프로그램을 작성하는 것과 복잡성 때문에 광범위한 채택에 대한 해결책이 필요

       

  • 기술자와 분석가는 SQL 쿼리 방식의 도구를 생각, 대부분의 개발자들이 PHP와 파이선 같은 스크립트 언어에 익숙 
  • 익숙한 언어와 hadoop 프로그램이 요구하는 언어의 차이를 줄일 수 있는 소프트웨어를 개발하는 것이 필수
  • 데이터가 테이블과 파티션으로서 표현될 수 있고 쿼리와 분석을 위해 SQL같은 언어 또한 사용할 수 있는 시스템. 사용자가 선택한 프로그래밍 언어로 작성된 맵리듀스 프로그램을 쿼리에 연결할 수 있는 능력. --> HIVE 사용.

       

    - 하이브는 하둡의 최상위에 구축된 데이터 웨어하우스 기반구조이다. 

    - 페이스북의 하둡에 저장된 데이터에 쿼리하기 위해 사용될 수 있는 도구.

       

       

       

       

Mysql Installation on Ubuntu

   

  • Mysql 설치

Apt-get Install mysql-server mysql-client

   

  • MySQL 서비스 상태 확인

   

  • MySQL 서비스 재시작
  • /etc/init.d/mysql restart

       

MySQL에 Hive 계정 생성(default)

Mysql -uroot -p

   

  • 데이터베이스 생성

Mysql> create database metastore_db;

Mysql> use mysql

   

  • 계정만들기

Mysql> insert into user(host,user,password) values('%', 'hive', password('hive'));

Mysql> Flush privileges;

Mysql> grant all privileges on metastore_db.* to hive@localhost identified by 'hive' with grant option;

Mysql> Flush privileges;

Mysql> grant all privileges on metastore_db.* to hive@'%' identified by 'hive' with grant option;

Mysql> Flush privileges;

   

  • Hive 다운로드 및 설치

>wget http://archive.cloudera.com/cdh/3/hive-0.7.1-cdh3u5.tar.gz

>tar xvgz hive-0.7.1-cdh3u5.tar.gz

>wget

http://repo.maven.apache.org/maven2/mysql/mysql-connector-java/5.1.9/my

sql-connector-java-5.1.9.jar

>cp mysql-connector-java-5.1.9.jar ./hive-0.7.1-cdh3u5/lib

   

  • Hive 설정
     
    • HIVE_HOME/conf/hive-defalt.xml

   

<property>

<name>hive.metastore.local</name>

<value>true</value>

</property>

<property>

<name>javax.jdo.option.ConnectionURL</name>

<value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true</value>

</property>

<property>

<name>javax.jdo.option.ConnectionDriverName</name>

<value>com.mysql.jdbc.Driver</value>

<description>Driver class name for a JDBC metastore</description>

</property>

<property>

<name>javax.jdo.option.ConnectionUserName</name>

<value>root</value>

</property>

<property>

<name>javax.jdo.option.ConnectionPassword</name>

<value>[패스워드 설정]</value>

</property>

   

  • HADOOP_HOME/conf/core-site.xml

<configuration>

<property>

<name>fs.default.name</name>

<value>hdfs://hadoop-virtualbox:9000</value>

</property>

</configuration>

   

Hive 디렉터리 생성

<HADOOP_HOME>/bin/hadoop fs –mkdir /tmp

<HADOOP_HOME>/bin/hadoop fs -mkdir /user/hive/warehouse

<HADOOP_HOME>/bin/hadoop fs -chmod g+w /tmp

<HADOOP_HOME>/bin/hadoop fs -chmod g+w /user/hive/warehouse

   

데이터 다운로드

• wget

http://www.grouplens.org/sites/www.grouplens.org/external_fi

les/data/ml-10m.zip

• wget http://www.grouplens.org/system/files/ml-1m.zip

• unzip ml-1m.zip

• cd ml-1m

   

HIVE Command

hive> CREATE TABLE rating(userid INT, movieid INT, rating FLOAT, ds STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE;

   

hive> show tables;

   

hive> LOAD DATA LOCAL INPATH '/home/hadoop/m1-1m,/ratings.dat' OVERWRITE INTO TABLE rating;

   

hive> select count(*) from rating r;

   

hive> select r.userid from rating r where r.userid = '41810';

   

[참고자료]

거침없이 배우는 하둡(Hadoop IN Action)

http://hadoopdb.sourceforge.net/guide/

http://hadoop.apache.org/docs/r0.20.2/hdfs_design.html

http://www.ibm.com/developerworks/kr/library/

http://bradhedlund.com/2011/09/10/understanding-hadoop-clusters-and-the-network/

   

   

   

블로그 이미지

운명을바꾸는자

IT와 함께 살아가는 삶

,



Oracle VM-UBUNTU

   

  • 새로 만들기(유분투)

       

   

   

   

  • 메모리(2048MB) 설정

       

   

   

   

   

   

  • 유분투 설치

       

       

       

       

       

  • 게스트 확장 설치

       

       

  • 공유폴더 지정

       

       

  • Root 유저 사용을 위한 Passwd 변경

       

       

       

(Root 유저)

   

Mkdir -p /media/shared

   

Mount -t vboxsf shared /media/shared

   

Vi /etc/init.d/rc.local

   

Mount -t vboxsf shared /media/shared

   

UBUNTU Telnet 서버스 설치

  • Hadoop 설정시 Telnet 과 SSH가 열려 있어야 합니다.

       

    • Telnet 패키지 설치 (root 유저)

Apt-get install xinetd

Apt-get install telnetd

   

  • Vi /etc/xinetd.conf

       

   

service telnet

   

{

disable = no

flags = REUSE

socket_type = stream

wait = no

user = root

server = /usr/sbin/in.telnetd

log_on_failure += USERID

   

}

   

/etc/init.d/xinetd restart

Telnet restart

   

   

  • Telnet 접속확인

Telnet localhost

   

   

  • Network 설정

     

     

       

       

       

       

       

UBUNTU SSH 설치

   

  • SSH Pkg 설치

Apt-get install openssh-server openssh-client

  • Key 생성

Ssh-keygen -t rsa -P ""

   

   

  • 인증키 복사

Cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys

   

자바설치

   

  • 설치파일

Chmod +x jdk-6u38-linux-i586.bin

./jdk-6u38-linux-i586.bin

   

update-alternatives --install /usr/bin/javac javac /usr/lib/jvm/bin/javac 1

update-alternatives --install /usr/bin/java java /usr/lib/jvm/bin/java 1

update-alternatives --install /usr/bin/javaws javaws /usr/lib/jvm/bin/javaws 1

   

  • Java version 확인

Java -version

   

   

   

Hadoop 설치 - Pseudo mode

  • Local Mode, Pseudo Mode, Cluster Mode 중 Pseudo Mode 설치

       

  • 설치파일

Tar xvzf ./hadoop-0.20.2-cdh3u5.tar.gz

   

  • Hadoop psuedo-mode 설정
  • Home/hadoop/hadoop-0.20.2-cdh3u5/conf

       

  • Core-site.xml : namenode

<configuration>

<property>

<name>fs.default.name</name>

<value>hdfs://localhost:9000</value>

</property>

</configuration>

~

  • Hdfs-site.xml : HDFS 관련 설정, 복제경로

<configuration>

<property>

<name>fds.replication</name>

<value>1</value>

</property>

</configuration>

  • Mapred-site.xml : Job Tracker 위치 및 Task 정보

   

<configuration>

<property>

<name>mapred.job.tracker</name>

<value>hdfs://localhost:9001</value>

</property>

</configuration>

  • Hadoop-env.sh : 하둡 데몬관련 환경변수

export JAVA_HOME=/usr/lib/jvm

   

  • 환경변수 설정(.profile)

export JAVA_HOME=/usr/lib/jvm

export PATH=$PATH:$JAVA_HOME/bin

export HADOOP_HOME=/home/hadoop/hadoop-0.20.2-cdh3u5

   

Hadoop 구동(/home/hadoop/hadoop-0.20.2-cdh3u5)

  • Hadoop namenode -format

       

Hadoop 실행 및 프로세스 확인

  • ./start-all.sh
  • /usr/lib/jvm/bin/Jps

  • 기타 명령어
    • Hadoop fs -ls
    • Hadoop fs -mkdir
    • Hadoop fs -put
    • Hadoop fs -rmr
    • Hadoop fs -cat
    • Hadoop fs -getmerge

         

예제 1 WordCount

   

  • 디렉터리 생성 및 분석 파일 업로드

  • MapReduce 실행

       

  • 결과 확인

       

       

       

       

  • Monitoring

       

    NameNode : 50070

    DataNode : 50075

    Secondary NameNode : 50090

    Bakcup / Checkpoint Node : 50105

    JobTracker : 50030

    TraskTracker : 50060

       

       

[참고자료]

거침없이 배우는 하둡(Hadoop IN Action)

http://hadoopdb.sourceforge.net/guide/

http://hadoop.apache.org/docs/r0.20.2/hdfs_design.html

http://www.ibm.com/developerworks/kr/library/

http://bradhedlund.com/2011/09/10/understanding-hadoop-clusters-and-the-network/

   

블로그 이미지

운명을바꾸는자

IT와 함께 살아가는 삶

,


Big Data Vs RDBMS

   

  • 정형화된 비즈니스적인 중요 데이터의 경우 RDBMS 사용
  • RDBMS: seletct -> 처리 -> 보내기 / Big-Data : 작동방식(funtion)-> 처리 -> 처리완료
  • 개발 방식의 변화 OOP(object-oriented Programming) -> FP(Funtion Programming)
  • 비즈니스적으로는 중요하지만, 분석을 통하여 새로운 정보를 추출하는 경우 Big-Data 사용
  • 결론적으로, RDBMS와 Big-Data는 적절한 업무 및 환경 파악을 통하여 효율적인 구축이 필요합니다. ( 각 업무와 환경에 맞는 DB를 선택적으로 선별하여 사이트구축, 정형화되고 표준화된 Database -> 비정형화되고 다양한 형태의 Database 형태로 변화하고 있습니다)

       

데이터 분석 순서

  • 데이터 수집 ( Flune,Scribe, Chukwa)
  • 데이터 저장 ( HDFS)
  • 트렌젝션 데이터저장 (NO-SQL)
  • 실시간 분석 (S4, Strom, OEP)
  • 배치 분석 (Map/Reduce)
  • 데이터 마이닝/통계 ( Mahout R)
  • 클러스터 관리 (zookeeper, HIVE)
  • 데이터직렬화 (Arvo 등)

   

HaDoop Distributed File System

   

  • HDFS 의 장단점
    • 장점 : X86 장비내보 로컬디스크, 수천개의 대규모 클러스터, 고가용성(1-5000), 장애시 자동감시 및 복구, 서버 추가 및 재설치 불필요, 분석용 데이터 저장가능
    • 단점 : 저장가능한 파일수의 한계, Namenode의 SPOF, 범용스토리지를 사용할 경우의 제약
      • 단일 고장점(single point of failure, SPOF)는 시스템 구성 요소 중에서, 동작하지 않으면 전체 시스템이 중단되는 요소를 말한다.

           

  • 데이터 저장 방식
    • NameNode에서 해당 데이터의 위치정보를 따라 Client를 통해 DataNode에 내려가게 되면, 해당 데이터는 복제(default: 3) 되어 DataNode에 저장됩니다.
    • DataNode가 사용할 수 없게 될 경우, NameNode에서는 해당데이터와 같은 내용이 복제되어 저장된 DataNode의 메타정보를 통하여 Read & Write를 진행하고 다시 한번 더 복제가 발생하여 보관됩니다.

   

   

Name Node

   

  • NameNode: 실데이터 DataNode 분산저장, 각 데이터 위치에 대한 메타정보 및 변경된 내역에 대한 Log정보를 가지고 있습니다.(기본 64MB, 복제: 3), 메모리 상에 올려놓고 각 데이터의 위치정보를 주는 역할과 변경된 내용을 저장하는 역할을 담당합니다.
  • Fsimage : 각 데이터의 위치정보, 메타정보를 가지고 있어서 쓰기 및 읽을 경우, 반드시 Name Node를 통하여 데이터를 이용할 수 있습니다.(메타정보관리)
  • Edit logs: DataNode에서 수정 및 반영된 모든 내용을 Edit log에 기록하는 역할을 담당합니다.(RDBMS의 Redolog와 비슷한 역할)
  • Namenode의 경우, 그 역활이 중요하기 때문에 SecondaryNameNode를 통하여 실시간(5분)으로 병합 및 동기화를 통하여 혹시나 발생할 수 있는 장애에 대비해야 합니다.
  • 1개의 NameNode에 대략 5000개의 DataNode 클러스터를 구성할 수 있습니다.

   

SecondaryNameNode

   

  • SecondaryNamenode의 경우, NameNode의 Fsimage(메타정보) 백업 및 edit log 정보를 병합 및 동기화하는 역할을 수행합니다.
  • NameNode에 장애가 발생할 경우, 백업된 메타정보와, 병합 및 동기화된 edit log 정보를 통하여 IP및 DNS정보를 변경하여 SecondaryNamenode-> NameNode로 변경하여 장애처리가 가능합니다.
  • Edit log가 많이 쌓이고 SecondaryNamenode가 없을 경우, 해당 로그정보들이 모두 처리되기 전까지 구동되지 않는 경우가 발생할 수 있습니다. 또한 수행 속도의 저하 및 메모리 낭비현상도 발생할 수 있습니다.

       

HDFS Client

   

  • HDFS Client의 경우 NameNode와 DataNode 사이에서 데이터의 Read와 Write를 조율해주는 역할을 해줍니다.
  • Client에서 데이터를 요청했을 경우, NameNode에서 먼저 해당 데이터에 대한 메타정보를 HDFS Client에게 보내주게 되고, 해당 정보를 바탕으로 각 DataNode에 분산되어 복제되어 있는 데이터를 가져오는 형태로 진행됩니다.

       

Data Node

   

   

  • Data Node의경우 실 데이터가 보관되어 있는 node로 데이터가 저장될 경우, 복제(default :3)가 발생하여 위 그림과 같이 각기 다른 노드에 복제되어 저장되어 있게 됩니다. 복제된 위치정보의 경우 NameNode에 의하여 관리되게 됩니다.
  • DataNode는 HeartBeat를 NameNode에게 날리면서 자신이 살아있음을 지속적으로 표현하고 혹시나 DataNode가 사용할 수 없게 될 경우 복제된 다른 노드에서 데이터를 가지고 올 수 있도록 합니다.
  • 결론적으로 NameNode의 메타정보가 분실될 경우, 데이터를 사용할 수 없게 됩니다.

       

MapReduce

  • 평균 계산 및 분산처리 결과 추출(병렬)시 뛰어난 성능을 발휘 합니다. 단, Sorting시 어려운 점이 있습니다.
  • Key-Value 형태로 데이터를 정리하고(Mapping) 각 데이터별로 Sorting을 통하여 (Shuffling) 핵심 데이터로 줄여서 (Reducing) 그 결과를 사용자에게 제공합니다.
  • Map/Reduce Job 을 구성한 클래스
    • Reducer
      • Mapper의 출력을 읽어 처리
      • 입력 Mapper의 Key와 Value 목록, 출력 Key-Value
    • Mapper
      • InputSplit에서 파일을 읽어서 처리
    • Driver
      • 각종 파일, 파라미터 처리
      • Hadoop Map/Reduce Job 실행
    • Combiner
      • Mapper에서 동작하는 Minireducer(성능 개선 시 추가되는 Class)
    • Patitiner
      • Mapper의 Key를 어느 Reducer에 보낼지 처리(확장 개선 시 추가되는 Class)
    • InputFormat / RecordReal : 입력파일을 읽어드리는 역할

       

   

Data Read 및 Write 과정

   

  • Client Data Read -> Namenode에서 해당 Data의 메타정보를 -> HDFS Client에게 전송 -> Map/Reduce -> Job Client에 Job을 제출(XML파일) -> JobTracker -> TaskerTracker -> DataNode(파이프라인) -> 데이터 찾기 완료 -> Client에게 ACK 전송(완료)
  • Client Data Write -> 반대로 -> Name Node 메모리와 Log에 변경내용 저장
  • Hadoop(Big-data)의 경우, Update 및 데이터 변경이 어렵습니다.

   

   

[참고자료]

거침없이 배우는 하둡(Hadoop IN Action)

http://hadoopdb.sourceforge.net/guide/

http://hadoop.apache.org/docs/r0.20.2/hdfs_design.html

http://www.ibm.com/developerworks/kr/library/

http://bradhedlund.com/2011/09/10/understanding-hadoop-clusters-and-the-network/

   

   

   

블로그 이미지

운명을바꾸는자

IT와 함께 살아가는 삶

,


Oracle RAC DBCA

  • Oracle 유저로 실행

   

   

   

   

   

  • Global SID 설정

   

   

   

   

  • ASM DISK GROUP 설정한 내역으로 저장위치 선택

   

   

   

   

   

   

설치완료후, 확인사항

[root@rac1 test]# crsctl stat res -t

--------------------------------------------------------------------------------

NAME TARGET STATE SERVER STATE_DETAILS

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.DATA.dg

ONLINE ONLINE rac1

ONLINE ONLINE rac2

ora.LISTENER.lsnr

ONLINE ONLINE rac1

ONLINE ONLINE rac2

ora.ORADATA.dg

ONLINE ONLINE rac1

ONLINE ONLINE rac2

ora.asm

ONLINE ONLINE rac1 Started

ONLINE ONLINE rac2 Started

ora.gsd

OFFLINE OFFLINE rac1

OFFLINE OFFLINE rac2

ora.net1.network

ONLINE ONLINE rac1

ONLINE ONLINE rac2

ora.ons

ONLINE ONLINE rac1

ONLINE ONLINE rac2

ora.registry.acfs

ONLINE ONLINE rac1

ONLINE ONLINE rac2

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

1 ONLINE ONLINE rac1

ora.cvu

1 ONLINE ONLINE rac1

ora.oc4j

1 ONLINE ONLINE rac1

ora.rac.db

1 ONLINE ONLINE rac1 Open

2 ONLINE ONLINE rac2 Open

ora.rac1.vip

1 ONLINE ONLINE rac1

ora.rac2.vip

1 ONLINE ONLINE rac2

ora.scan1.vip

1 ONLINE ONLINE rac1

[root@rac1 test]#

   

t@rac1 test]# crsctl check crs

CRS-4638: Oracle High Availability Services is online

CRS-4537: Cluster Ready Services is online

CRS-4529: Cluster Synchronization Services is online

CRS-4533: Event Manager is online

   

[root@rac1 test]# olsnodes -n

rac1 1

rac2 2

[root@rac1 test]#

   

[root@rac1 test]# ocrcheck

Status of Oracle Cluster Registry is as follows :

Version : 3

Total space (kbytes) : 262120

Used space (kbytes) : 3008

Available space (kbytes) : 259112

ID : 1715847047

Device/File Name : +DATA

Device/File integrity check succeeded

   

Device/File not configured

   

Device/File not configured

   

Device/File not configured

   

Device/File not configured

   

Cluster registry integrity check succeeded

   

Logical corruption check succeeded

   

[root@rac1 test]#

   

[root@rac1 test]# crsctl query css votedisk

## STATE File Universal Id File Name Disk group

-- ----- ----------------- --------- ---------

1. ONLINE 6d2c92cdd51f4fabbfe439c297f14698 (ORCL:OCR_VOTE01) [DATA]

2. ONLINE 12637439cc644fdcbf573004cbe18474 (ORCL:OCR_VOTE02) [DATA]

3. ONLINE 382c20ff00364f04bffcf688406e6e31 (ORCL:OCR_VOTE03) [DATA]

Located 3 voting disk(s).

[root@rac1 test]#

   

   

[참고문헌]

   

http://www.allsoft.co.kr/bbs/board.php?bo_table=study91_2&wr_id=30

http://hook.tistory.com/275

http://blog.naver.com/jjuneye?Redirect=Log&logNo=110152112359

http://estenpark.tistory.com/284

http://www.oracle.com/pls/db112/homepage

http://pangsun.tistory.com/

http://www.oracleclub.com/

https://support.oracle.com

블로그 이미지

운명을바꾸는자

IT와 함께 살아가는 삶

,