{{pasted:20251229-044558.png}}
library(tidyverse)
library(lavaan)
cormat = lav_matrix_lower2full(c(1,
0.418613, 1,
0.4153223, 0.2652651, 1,
0.2670484, 0.4315334, 0.2709399, 1))
colnames(cormat) <- rownames(cormat) <- c("X1","X2","X3","Y")
cormat
n_mean = matrix( c(rep(254,4),4.066, 4.024, 4.033, 3.919),
ncol=4, nrow = 2, byrow = T )
n_mean
n_mean <- n_mean %>%
`colnames<-`(c("X1","X2","X3","Y")) %>%
`rownames<-`(c("n", "mean"))
n_mean
# Install the MASS package if not already installed
# install.packages("MASS")
# Load the package
library(MASS)
library(lavaan)
#01 데이터 입력 ####
#AC:개인이 성공 성취도,
#IN:가계의 수입,
#AB:개인의 능력,
#SU:개인의성공열정
#covMat #공분산 행렬로 결과 확인
# 1. Define the mean vector (for 3 variables)
mns <- c(100, 120, 150, 130)
# 2. Define the covariance matrix (must be symmetric and positive-definite)
# Example: 3x3 covariance matrix
cv_mat <- lav_matrix_lower2full(
c(26.5,
21.5, 37.9,
23.5, 25.2, 41.7,
16.5, 14.6, 14.5, 15.8))
colnames(cvMat) <- rownames(cvMat) <- c("AC", "IN", "AB","SU")
# 3. Simulate the data (e.g., 100 observations)
n_observations <- 300
sim_d <- mvrnorm(n = n_observations, mu = mns, Sigma = cv_mat)
sim_data <- round(sim_d,0)
# View the first few rows of the simulated data
colnames(sim_data) <- c("AC", "IN", "AB","SU")
head(sim_data)
str(sim_data)
PathModel <- '
SU ~ a*AB +b*IN
AC ~ c*SU + d*AB
AB ~~ IN
'
#03 분석진행은 sem을 이용하여 한다. ####
# sem(모델, sample.cov= 공분산 행렬, sample.nobs=샘플의 개수)
#fitM <- sem(PathModel, sample.cov = covMat, sample.nobs = 300,
# fixed.x = FALSE)
fitM <- sem(PathModel, data=sim_data)
#04 결과보기 ####
summary(fitM, standardized=TRUE,fit.measures=TRUE, rsquare=T)
# install.packages("semPlot")
library(semPlot)
semPaths(fitM, whatLabels = "std", curveAdjacent = TRUE,
style = "lisrel",
intAtSide=TRUE, intercepts = TRUE, rotation = 2,
layout = "tree3",
edge.label.cex = 1,
edge.label.position=1, edge.color = "black",
color=list(lat="tomato", man="grey"),
curvature=3,
exoVar = TRUE,exoCov = TRUE)
data(PoliticalDemocracy)
PoliticalDemocracy
sem.model <- "
# measurement model
ind60 =~ x1 + x2 + x3
dem60 =~ y1 + y2 + y3 + y4
dem65 =~ y5 + y6 + y7 + y8
# regressions
dem60 ~ ind60
dem65 ~ ind60 + dem60
# residual correlations
y1 ~~ y5
y2 ~~ y4 + y6
y3 ~~ y7
y4 ~~ y8
y6 ~~ y8
"
# Estimate model
fit1 <- sem(model = sem.model, data = PoliticalDemocracy)
# show summary result
summary(fit1, fit.measures=TRUE, standardized=TRUE)
library(lavaan)
library(semPlot)
library(OpenMx)
library(tidyverse)
library(knitr)
library(kableExtra)
library(GGally)
model <-'
mpg ~ hp + gear + cyl + disp + carb + am + wt
hp ~ cyl + disp + carb
'
fit.mod <- sem(model, data=mtcars)
summary(fit.mod, standardize=T, rsquare=T)
dat <- read.csv("http://commres.net/_media/r/eating.disorder.sim.csv")
head(dat)
n <- nrow(dat)
S <- cov(dat)
cor <- cor(dat)
#
# xSelf-Efficacy = BMI + Self-Esteem + XDisturbance
# Bulimic Symptoms = BMI + Self-Esteem + XSelf-Efficacy + XDisturbance
# Restrictive Symptoms = BMI + Self-Esteem + xSelf-Efficacy + xDisturbance
# Overall Risk = BMI + Self-Esteem + xSelf-Efficacy + Acculturation + xDisturbance
#
ex1 <- " #opening a quote
# Tilda ~ : Regression
# M ~ X regression (X predicts M)
# Each line corresponds to an equation
# Disturbance is automatically included for each regression
# (i.e. no extra term needed)
DietSE ~ BMI + SelfEsteem #DietSE is predicted by BMI and SelfEsteem
Bulimia ~ DietSE + BMI + SelfEsteem
Restrictive ~ DietSE + BMI + SelfEsteem
Risk ~ DietSE + BMI + SelfEsteem + Accu
"
ex1.fit <- sem(ex1, data=dat)
summary(ex1.fit, standardize=T, rsquare=T)
ex1b.fit <- sem(model = ex1, sample.cov = S, sample.nobs = n)
summary(ex1b.fit)
library(tidyverse)
library(lavaan)
cormat = lav_matrix_lower2full(c(1,
0.418613, 1,
0.4153223, 0.2652651, 1,
0.2670484, 0.4315334, 0.2709399, 1))
colnames(cormat) <- rownames(cormat) <- c("X1","X2","X3","Y")
cormat
n_mean = matrix( c(rep(254,4),4.066, 4.024, 4.033, 3.919),
ncol=4, nrow = 2, byrow = T )
n_mean
n_mean <- n_mean %>%
`colnames<-`(c("X1","X2","X3","Y")) %>%
`rownames<-`(c("n", "mean"))
n_mean
# Install the MASS package if not already installed
# install.packages("MASS")
# Load the package
library(MASS)
library(lavaan)
#01 데이터 입력 ####
#AC:개인이 성공 성취도,
#IN:가계의 수입,
#AB:개인의 능력,
#SU:개인의성공열정
#covMat #공분산 행렬로 결과 확인
# 1. Define the mean vector (for 3 variables)
mns <- c(100, 120, 150, 130)
# 2. Define the covariance matrix (must be symmetric and positive-definite)
# Example: 3x3 covariance matrix
cv_mat <- lav_matrix_lower2full(
c(26.5,
21.5, 37.9,
23.5, 25.2, 41.7,
16.5, 14.6, 14.5, 15.8))
colnames(cvMat) <- rownames(cvMat) <- c("AC", "IN", "AB","SU")
# 3. Simulate the data (e.g., 100 observations)
n_observations <- 300
sim_d <- mvrnorm(n = n_observations, mu = mns, Sigma = cv_mat)
sim_data <- round(sim_d,0)
# View the first few rows of the simulated data
colnames(sim_data) <- c("AC", "IN", "AB","SU")
head(sim_data)
str(sim_data)
PathModel <- '
SU ~ a*AB +b*IN
AC ~ c*SU + d*AB
AB ~~ IN
'
#03 분석진행은 sem을 이용하여 한다. ####
# sem(모델, sample.cov= 공분산 행렬, sample.nobs=샘플의 개수)
#fitM <- sem(PathModel, sample.cov = covMat, sample.nobs = 300,
# fixed.x = FALSE)
fitM <- sem(PathModel, data=sim_data)
#04 결과보기 ####
summary(fitM, standardized=TRUE,fit.measures=TRUE, rsquare=T)
# install.packages("semPlot")
library(semPlot)
semPaths(fitM, whatLabels = "std", curveAdjacent = TRUE,
style = "lisrel",
intAtSide=TRUE, intercepts = TRUE, rotation = 2,
layout = "tree3",
edge.label.cex = 1,
edge.label.position=1, edge.color = "black",
color=list(lat="tomato", man="grey"),
curvature=3,
exoVar = TRUE,exoCov = TRUE)
data(PoliticalDemocracy)
PoliticalDemocracy
sem.model <- "
# measurement model
ind60 =~ x1 + x2 + x3
dem60 =~ y1 + y2 + y3 + y4
dem65 =~ y5 + y6 + y7 + y8
# regressions
dem60 ~ ind60
dem65 ~ ind60 + dem60
# residual correlations
y1 ~~ y5
y2 ~~ y4 + y6
y3 ~~ y7
y4 ~~ y8
y6 ~~ y8
"
# Estimate model
fit1 <- sem(model = sem.model, data = PoliticalDemocracy)
# show summary result
summary(fit1, fit.measures=TRUE, standardized=TRUE)
library(lavaan)
library(semPlot)
library(OpenMx)
library(tidyverse)
library(knitr)
library(kableExtra)
library(GGally)
model <-'
mpg ~ hp + gear + cyl + disp + carb + am + wt
hp ~ cyl + disp + carb
'
fit.mod <- sem(model, data=mtcars)
summary(fit.mod, standardize=T, rsquare=T)
dat <- read.csv("http://commres.net/_media/r/eating.disorder.sim.csv")
head(dat)
n <- nrow(dat)
S <- cov(dat)
cor <- cor(dat)
#
# xSelf-Efficacy = BMI + Self-Esteem + XDisturbance
# Bulimic Symptoms = BMI + Self-Esteem + XSelf-Efficacy + XDisturbance
# Restrictive Symptoms = BMI + Self-Esteem + xSelf-Efficacy + xDisturbance
# Overall Risk = BMI + Self-Esteem + xSelf-Efficacy + Acculturation + xDisturbance
#
ex1 <- " #opening a quote
# Tilda ~ : Regression
# M ~ X regression (X predicts M)
# Each line corresponds to an equation
# Disturbance is automatically included for each regression
# (i.e. no extra term needed)
DietSE ~ BMI + SelfEsteem #DietSE is predicted by BMI and SelfEsteem
Bulimia ~ DietSE + BMI + SelfEsteem
Restrictive ~ DietSE + BMI + SelfEsteem
Risk ~ DietSE + BMI + SelfEsteem + Accu
"
ex1.fit <- sem(ex1, data=dat)
summary(ex1.fit, standardize=T, rsquare=T)
ex1b.fit <- sem(model = ex1, sample.cov = S, sample.nobs = n)
summary(ex1b.fit)
* sand box:code01
* *sand box:output01
{{clock}}
----
graph TD
A(**mermaid**)-->B((__plugin__))
A-->C(((//for//)))
B-->D[["[[https://www.dokuwiki.org/dokuwiki|Dokuwiki]]"]]
C-->D
\begin{eqnarray*}
& & P(A \mid B) = \dfrac{P(A \cap B)}{P(B)}\\
& & P(B \mid A) = \dfrac{P(B \cap A)}{P(A)}\\
\\
& & P(B \vert A) \;\; \text{ vs. } \;\; P(B \mid A) \\
& & P(A \cap B) = P(A \mid B) * P(B) \\
& & P(B \cap A) = P(B \mid A) * P(A) \\
& & P(A \cap B) = P(A, B) \\
\\
& & \frac{3}{4 \pi} \sqrt{4 \cdot x^2 12} \\
& & \lim_{n \to \infty} \sum_{k=1}^n \frac{1}{k^2} = \frac{\pi^2}{6} \\
& & {\it f}(x) = \frac{1}{\sqrt{x} x^2} \\
& & e^{i \pi} + 1 = 0\;
\end{eqnarray*}
* [[:sand box/intro]]
* [[:sand box/body]]
* [[:sand box/conc]]
{{tabinclude>sand_box:page1|Top page,sand_box:page2|Second page,*sand_box:page3}}
* *sand_box:page1
* sand_box:page2
* sand_box:page3
[{{:r.regressionline3.png}}]
\begin{align*}
& \;\;\;\; \sum{(Y_i - \hat{Y_i})^2} \\
&= \sum{(Y_i - (a + bX_i))^2} \;\;\; \because \hat{Y_i} = a + bX_i \\
&= \text{SSE or SS.residual} \;\;\; \text{(and this should be the least value.)} \\
\end{align*}
\begin{align*}
&\text{for a (constant)} \\ \\
&\dfrac{\text{d}}{\text{dv}} \sum{(Y_i - (a + bX_i))^2} \\
&= \sum \dfrac{\text{d}}{\text{dv}} {(Y_i - (a + bX_i))^2} \\
&= \sum{2 (Y_i - (a + bX_i))} * (-1) \;\;\;\; \\
&\because \dfrac{\text{d}}{\text{dv for a}} (Y_i - (a+bX_i)) = -1 \\
& = -2 \sum{(Y_i - (a + bX_i))} \\
\\
&\text{in order to have the least value, the above should be zero} \\
\\
&-2 \sum{(Y_i - (a + bX_i))} = 0 \\
&\sum{(Y_i - (a + bX_i))} = 0 \\
&\sum{Y_i} - \sum{a} - b \sum{X_i} = 0 \\
&\sum{Y_i} - n*{a} - b \sum{X_i} = 0 \\
&n*{a} = \sum{Y_i} - b \sum{X_i} \\
&a = \dfrac{\sum{Y_i}}{n} - b \dfrac{\sum{X_i}}{n} \\
&a = \overline{Y} - b \overline{X} \\
\end{align*}
\begin{eqnarray*}
\text{for b, (coefficient)} \\
\\
\dfrac{\text{d}}{\text{dv}} \sum{(Y_i - (a + bX_i))^2} & = & \sum \dfrac{\text{d}}{\text{dv}} {(Y_i - (a + bX_i))^2} \\
& = & \sum{2 (Y_i - (a + bX_i))} * (-X_i) \;\;\;\; \\
& \because & \dfrac{\text{d}}{\text{dv for b}} (Y_i - (a+bX_i)) = -X_i \\
& = & -2 \sum{X_i (Y_i - (a + bX_i))} \\
\\
\text{in order to have the least value, the above should be zero} \\
\\
-2 \sum{X_i (Y_i - (a + bX_i))} & = & 0 \\
\sum{X_i (Y_i - (a + bX_i))} & = & 0 \\
\sum{X_i (Y_i - ((\overline{Y} - b \overline{X}) + bX_i))} & = & 0 \\
\sum{X_i ((Y_i - \overline{Y}) - b (X_i - \overline{X})) } & = & 0 \\
\sum{X_i (Y_i - \overline{Y})} - \sum{b X_i (X_i - \overline{X}) } & = & 0 \\
\sum{X_i (Y_i - \overline{Y})} & = & b \sum{X_i (X_i - \overline{X})} \\
b & = & \dfrac{\sum{X_i (Y_i - \overline{Y})}}{\sum{X_i (X_i - \overline{X})}} \\
b & = & \dfrac{\sum{(Y_i - \overline{Y})}}{\sum{(X_i - \overline{X})}} \\
b & = & \dfrac{ \sum{(Y_i - \overline{Y})(X_i - \overline{X})} } {\sum{(X_i - \overline{X})(X_i - \overline{X})}} \\
b & = & \dfrac{ \text{SP} } {\text{SS}_\text{x}} \\
\end{eqnarray*}