This post introduces a quick reference on how to set up serveral main stream DBMSs on macOS(tested on macOS 10.12, 10.13 and 10.14). All the code snippets are dedicated to run in bash directly, as a result of which there is no need to switch between shell interpreter and text editor back and forth.

Each code snippet is divided into to 6 parts, i.e. Install, Configure, Initialize, Start, Connect and Stop.

And all the related data is stored inside /db_data/.

System Prerequisite

# Setup passphraseless ssh
sudo systemsetup -setremotelogin on
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 0600 ~/.ssh/authorized_keys

# Configure system kernel state
sudo tee /etc/sysctl.conf << EOF
kern.sysv.shmmax=2147483648
kern.sysv.shmmin=1
kern.sysv.shmmni=64
kern.sysv.shmseg=16
kern.sysv.shmall=524288
kern.maxfiles=65535
kern.maxfilesperproc=65536
kern.corefile=/cores/core.%N.%P
EOF
cat /etc/sysctl.conf | xargs sudo sysctl

# Add data folder
sudo install -o $USER -d /db_data/

HDFS

# Install
brew install hadoop

# Configure
export HADOOP_HOME=/usr/local/opt/hadoop/libexec
tee $HADOOP_HOME/etc/hadoop/core-site.xml << EOF
<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://localhost:8020</value>
    </property>
    <property>
         <name>hadoop.proxyuser.$USER.hosts</name>
         <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.$USER.groups</name>
        <value>*</value>
    </property>
</configuration>
EOF
tee $HADOOP_HOME/etc/hadoop/hdfs-site.xml << EOF
<configuration>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:///db_data/hdfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:///db_data/hdfs/data</value>
    </property>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
</configuration>
EOF

# Initialize
install -d /db_data/hdfs/name
install -d /db_data/hdfs/data
hdfs namenode -format

# Start
/usr/local/opt/hadoop/sbin/start-dfs.sh

# Connect
hdfs dfsadmin -report
hdfs dfs -ls /

# Stop
/usr/local/opt/hadoop/sbin/stop-dfs.sh

OushuDB

# Install
brew tap chiyang10000/tap
brew search chiyang10000/tap
brew install oushudb

# Configure
tee /usr/local/opt/oushudb/etc/hawq-site.xml << EOF
<configuration>
    <property>
        <name>hawq_dfs_url</name>
        <value>localhost:8020/hawq_default</value>
        <description>URL for accessing HDFS.</description>
    </property>
    <property>
        <name>hawq_master_address_host</name>
        <value>localhost</value>
    </property>
    <property>
        <name>hawq_master_address_port</name>
        <value>5432</value>
    </property>
    <property>
        <name>hawq_segment_address_port</name>
        <value>40000</value>
    </property>
    <property>
        <name>hawq_master_directory</name>
        <value>/db_data/hawq-data-directory/masterdd</value>
    </property>
    <property>
        <name>hawq_segment_directory</name>
        <value>/db_data/hawq-data-directory/segmentdd</value>
    </property>
    <property>
        <name>hawq_master_temp_directory</name>
        <value>/tmp</value>
    </property>
    <property>
        <name>hawq_segment_temp_directory</name>
        <value>/tmp</value>
    </property>
    <property>
        <name>hawq_magma_port_master</name>
        <value>50000</value>
    </property>
    <property>
        <name>hawq_magma_port_segment</name>
        <value>50005</value>
    </property>
    <property>
        <name>hawq_magma_locations_master</name>
        <value>file:///db_data/hawq-data-directory/magma_master</value>
    </property>
    <property>
        <name>hawq_magma_locations_segment</name>
        <value>file:///db_data/hawq-data-directory/magma_segment</value>
    </property>
    <property>
        <name>hawq_init_with_hdfs</name>
        <value>true</value>
        <description>choose whether initing hawq cluster with hdfs</description>
    </property>
    <property>
        <name>default_table_format</name>
        <value>appendonly</value>
        <description>default table format when creating table </description>
    </property>
</configuration>
EOF

# Initialize
rm -rf /opt/dependency*
rm -rf /db_data/hawq-data-directory
install -d /db_data/hawq-data-directory/masterdd
install -d /db_data/hawq-data-directory/segmentdd
install -d /db_data/hawq-data-directory/magma_master
install -d /db_data/hawq-data-directory/magma_segment
source /usr/local/opt/oushudb/greenplum_path.sh
hawq init cluster -a

# Start
source /usr/local/opt/oushudb/greenplum_path.sh
hawq start cluster -a

# Connect
source /usr/local/opt/oushudb/greenplum_path.sh
psql -d postgres

# Stop
source /usr/local/opt/oushudb/greenplum_path.sh
hawq stop cluster -a

Hive

Pay attention that Hive only supports Java 8 or older.

# Install
brew install hive

# Configure
export HIVE_HOME=/usr/local/opt/hive/libexec
tee $HIVE_HOME/conf/hive-site.xml << EOF
<configuration>
  <property>
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:derby:;databaseName=/db_data/hive_metastore_db;create=true</value>
  </property>
  <property>
    <name>hive.server2.thrift.port</name>
    <value>10000</value>
  </property>
  <property>
    <name>hive.server2.enable.doAs</name>
    <value>false</value>
  </property>
</configuration>
EOF

# Initialize
hadoop fs -mkdir       /tmp
hadoop fs -mkdir -p    /user/hive/warehouse
hadoop fs -chmod g+w   /tmp
hadoop fs -chmod g+w   /user/hive/warehouse
schematool -dbType derby -initSchema

# Start
hive --service hiveserver2
hive --service metastore

# Connect
## Connect to HDFS directly
hive
## Connect to hiveserver2
## Should wait until hiveserver2 started and be aware of connection refused
beeline -n $USER -u jdbc:hive2://127.0.0.1:10000

# Stop
killall hive

PostgreSQL

# Install
brew install postgresql

# Configure/Initialize
initdb -D /db_data/postgresql/

# Start
pg_ctl -D /db_data/postgresql/ start

# Connect
psql -d postgres

# Stop
pg_ctl -D /db_data/postgresql/ stop

MySQL

# Install
brew install mysql

# Configure/Initialize
mysqld --initialize-insecure --datadir=/db_data/mysql/

# Start
mysqld --datadir=/db_data/mysql/

# Connect
mysql -u root -D mysql

# Stop
killall mysqld