diff --git a/DESCRIPTION b/DESCRIPTION index 97d7df5c..63a9386d 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: migraph Title: Multimodal Network Analysis and More -Version: 1.1.1 -Date: 2023-10-11 +Version: 1.1.2 +Date: 2023-10-18 Description: A set of tools for analysing multimodal networks. It includes functions for measuring centrality, centralization, cohesion, closure, constraint and diversity, @@ -12,7 +12,7 @@ Description: A set of tools for analysing multimodal networks. Built on the 'manynet' package, all functions operate with matrices, edge lists, and 'igraph', 'network', and 'tidygraph' objects, and on one-mode, two-mode (bipartite), and sometimes three-mode networks. -URL: https://github.com/snlab-ch/migraph +URL: https://snlab-ch.github.io/migraph/ BugReports: https://github.com/snlab-ch/migraph/issues License: MIT + file LICENSE Language: en-GB diff --git a/NEWS.md b/NEWS.md index aed8b28d..eb9c0f1e 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,17 @@ +# migraph 1.1.2 + +2023-10-11 + +## Package + +- Added more code annotations in 'tutorial4' (Community) +- Elaborated 'tutorial5' (was named Equivalence, now Position) +- Chunks in tutorials that are incremental now hidden upon extraction using `purl = FALSE` argument + +## Members + +- Improved printing of node_members and node_measures objects + # migraph 1.1.1 2023-10-11 diff --git a/R/class_measures.R b/R/class_measures.R index 44a2e61d..ca4c65d8 100644 --- a/R/class_measures.R +++ b/R/class_measures.R @@ -1,4 +1,5 @@ make_node_measure <- function(out, .data) { + if(manynet::is_labelled(.data)) names(out) <- manynet::node_names(.data) class(out) <- c("node_measure", class(out)) attr(out, "mode") <- manynet::node_mode(.data) out @@ -154,6 +155,6 @@ print_tblvec <- function(y, names){ print(body) cat(pillar::style_subtle(paste("# ... with", setup$extra_cols_total, - "more from this nodeset in the vector."))) + "more values from this nodeset not printed but in the vector."))) } else print(body) } diff --git a/R/class_members.R b/R/class_members.R index a325fa4f..870b10da 100644 --- a/R/class_members.R +++ b/R/class_members.R @@ -1,4 +1,5 @@ make_node_member <- function(out, .data) { + if(manynet::is_labelled(.data)) names(out) <- manynet::node_names(.data) class(out) <- c("node_member", class(out)) attr(out, "mode") <- manynet::node_mode(.data) out @@ -24,13 +25,14 @@ print.node_member <- function(x, ..., } } else { for (i in names(table(x))) { - if (i == names(table(x))[1]) cat(i, "\n") - else cat("\n", i, "\n") + if (i == names(table(x))[1]) cat("Class ", i, ":", sep = "") + else cat("Class ", i, ":", sep = "") if (!is.null(names(x))) y <- paste(names(x[x == i]), collapse = ", ") else y <- paste(which(x == i), collapse = ", ") - cat(" ", y) + cat(" ", y) + if (i != names(table(x))[length(table(x))]) cat("\n") } } } diff --git a/R/member_community.R b/R/member_community.R index a83403cb..8ce1d4b7 100644 --- a/R/member_community.R +++ b/R/member_community.R @@ -65,7 +65,6 @@ node_kernighanlin <- function(.data){ # extract names of vertices in each group after swaps out <- ifelse(manynet::node_names(.data) %in% g1.newnames, 1, 2) - if(manynet::is_labelled(.data)) names(out) <- manynet::node_names(.data) make_node_member(out, .data) } diff --git a/inst/tutorials/tutorial3/centrality.Rmd b/inst/tutorials/tutorial3/centrality.Rmd index 511489ac..ee7edcbe 100644 --- a/inst/tutorials/tutorial3/centrality.Rmd +++ b/inst/tutorials/tutorial3/centrality.Rmd @@ -26,24 +26,26 @@ We can create a two-mode version of the dataset by renaming the nodal attribute "twomode_type" to just "type". Let's begin by graphing these datasets using `manynet::autographr()`. -```{r coercion, exercise = TRUE} +```{r coercion, exercise = TRUE, purl = FALSE} ``` -```{r coercion-hint-1} +```{r coercion-hint-1, purl = FALSE} # Let's graph the one-mode version autographr(____) ``` -```{r coercion-hint-2} +```{r coercion-hint-2, purl = FALSE} # Now, let's create a two-mode version 'ison_brandes2' and graph it. ison_brandes2 <- ison_brandes %>% rename(type = twomode_type) autographr(____) ``` -```{r coercion-solution} +```{r coercion-solution, purl = FALSE} +# plot the one-mode version autographr(ison_brandes) ison_brandes2 <- ison_brandes %>% rename(type = twomode_type) +# plot the two-mode version autographr(ison_brandes2) ``` @@ -52,17 +54,18 @@ even if it's just pretend. Luckily, `{manynet}` has a function for this. This makes plotting the network just a wee bit more accessible and interpretable: -```{r addingnames, exercise = TRUE} +```{r addingnames, exercise = TRUE, purl = FALSE} ison_brandes <- to_named(ison_brandes) ``` -```{r addingnames-hint-1} +```{r addingnames-hint-1, purl = FALSE} # Now, let's graph using the object names: "ison_brandes" autographr(____) ``` ```{r addingnames-solution} ison_brandes <- to_named(ison_brandes) +# plot network with names autographr(ison_brandes) ``` @@ -74,23 +77,24 @@ as they are assigned randomly from a pool of (American) first names. Let's start with calculating degree, as it is easy to calculate yourself. Just sum the rows or columns of the matrix! -```{r degreesum, exercise = TRUE, exercise.setup = "addingnames"} +```{r degreesum, exercise = TRUE, exercise.setup = "addingnames", purl = FALSE} ``` -```{r degreesum-hint-1} +```{r degreesum-hint-1, purl = FALSE} # We can calculate degree centrality like this: (mat <- as_matrix(ison_brandes)) (degrees <- rowSums(mat)) rowSums(mat) == colSums(mat) ``` -```{r degreesum-hint-2} +```{r degreesum-hint-2, purl = FALSE} # Or by using a built in command in migraph like this: node_degree(ison_brandes, normalized = FALSE) ``` ```{r degreesum-solution} +# manually calculate degree centrality mat <- as_matrix(ison_brandes) degrees <- rowSums(mat) rowSums(mat) == colSums(mat) @@ -98,7 +102,7 @@ rowSums(mat) == colSums(mat) node_degree(ison_brandes, normalized = FALSE) ``` -```{r degreesum-Q, echo=FALSE} +```{r degreesum-Q, echo=FALSE, purl = FALSE} question("Are the row sums the same as the column sums?", answer("Yes", correct = TRUE, @@ -112,11 +116,12 @@ Often we are interested in the distribution of (degree) centrality in a network. `{migraph}` offers a way to get a pretty good first look at this distribution, though there are more elaborate ways to do this in base and grid graphics. -```{r distrib, exercise = TRUE, exercise.setup = "addingnames"} +```{r distrib, exercise = TRUE, exercise.setup = "addingnames", purl = FALSE} ``` ```{r distrib-solution} +# distribution of degree centrality scores of nodes plot(node_degree(ison_brandes)) ``` @@ -131,23 +136,23 @@ Fortunately, we can use functions from `{migraph}` to help calculate the betweenness, closeness, and eigenvector centralities for each node in the network. Let's collect the vectors of these centralities for the `ison_brandes` dataset: -```{r micent, exercise = TRUE, exercise.setup = "addingnames"} +```{r micent, exercise = TRUE, exercise.setup = "addingnames", purl = FALSE} ``` -```{r micent-hint-1} +```{r micent-hint-1, purl = FALSE} # Use the node_betweenness() function to calculate the # betweenness centralities of nodes in a network node_betweenness(ison_brandes) ``` -```{r micent-hint-2} +```{r micent-hint-2, purl = FALSE} # Use the node_closeness() function to calculate the # closeness centrality of nodes in a network node_closeness(ison_brandes) ``` -```{r micent-hint-3} +```{r micent-hint-3, purl = FALSE} # Use the node_eigenvector() function to calculate # the eigenvector centrality of nodes in a network node_eigenvector(ison_brandes) @@ -186,11 +191,12 @@ e.g. `node_is_max()` or `tie_is_min()`. By passing this attribute to the `autographr()` argument "node_color" we can highlight which node or nodes hold the maximum score in red. -```{r ggid, exercise = TRUE, exercise.setup = "addingnames"} +```{r ggid, exercise = TRUE, exercise.setup = "addingnames", purl = FALSE} ``` ```{r ggid-solution} +# plot the network, highlighting the node with the highest centrality score with a different colour ison_brandes %>% add_node_attribute("color", node_is_max(node_degree(ison_brandes))) %>% autographr(node_color = "color") @@ -211,7 +217,7 @@ ison_brandes %>% How neat! Try it with the two-mode version. What can you see? -```{r ggid_twomode, exercise = TRUE} +```{r ggid_twomode, exercise = TRUE, purl = FALSE} # Instead of "ison_brandes", use "ison_brandes2" ``` @@ -234,7 +240,7 @@ ison_brandes2 %>% autographr(node_color = "color") ``` -```{r brandes2quiz} +```{r brandes2quiz, purl = FALSE} question("Select all that are true for the two-mode Brandes network.", answer("Only one node is selected in each plot."), answer("The maximum degree square has a higher degree than the maximum degree circle(s).", @@ -251,7 +257,7 @@ Here we are no longer interested in the level of the node, but in the level of the whole network, so the syntax replaces `node_` with `network_`: -```{r centzn, exercise = TRUE, exercise.setup = "addingnames"} +```{r centzn, exercise = TRUE, exercise.setup = "addingnames", purl = FALSE} ``` @@ -277,7 +283,7 @@ What if we want to have a single image/figure with multiple plots? This can be a little tricky with gg-based plots, but fortunately the `{patchwork}` package is here to help. -```{r multiplot, exercise = TRUE, exercise.setup = "addingnames"} +```{r multiplot, exercise = TRUE, exercise.setup = "addingnames", purl = FALSE} ``` @@ -304,7 +310,7 @@ ge <- autographr(ison_brandes, node_color = "eigenvector") + ``` -```{r centzdq} +```{r centzdq, purl = FALSE} question("How centralized is the ison_brandes network? Select all that apply.", answer("It is more degree centralised than betweenness centralised.", message = "Degree centralisation is at 0.18 for this network whereas betweenness centralisation is at 0.32. In other words, the network is better characterised as having 1 or 2 nodes lying on the shortest paths between others than one where 1 or 2 nodes have many more ties than the others."), @@ -317,7 +323,7 @@ question("How centralized is the ison_brandes network? Select all that apply.", allow_retry = TRUE) ``` -```{r centvcent, echo=FALSE} +```{r centvcent, echo=FALSE, purl = FALSE} question("What is the difference between centrality and centralisation according to the literature?", answer("Centrality is for nodes and centralisation is for networks", correct = TRUE), diff --git a/inst/tutorials/tutorial3/centrality.html b/inst/tutorials/tutorial3/centrality.html index 7da156d4..a1df2979 100644 --- a/inst/tutorials/tutorial3/centrality.html +++ b/inst/tutorials/tutorial3/centrality.html @@ -13,7 +13,7 @@ - + Centrality @@ -110,107 +110,96 @@
-
-

Calculating different centrality measures

-

For this exercise, we’ll use the ison_brandes and -ison_brandes2 datasets in {manynet}. The -ison_brandes2 is a two-mode version of the -ison_brandes dataset. This dataset is in a ‘tidygraph’ -format, but migraph makes it easy to coerce this into other forms to be -compatible with other packages.

+
+

Calculating centrality

+

For this exercise, we’ll use the ison_brandes dataset in +{manynet}. This dataset is in a ‘tidygraph’ format, but +manynet makes it easy to coerce this into other forms to be +compatible with other packages. We can create a two-mode version of the +dataset by renaming the nodal attribute “twomode_type” to just “type”. +Let’s begin by graphing these datasets using +manynet::autographr().

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
-
# Let's graph both datasets
-autographr(____)
+data-lines="0" data-pipe="|>">
+
# Let's graph the one-mode version
 autographr(____)
-
# Now, let's look at "ison_brandes" as a matrix 
-# using the as_matrix() function
-
-(mat <- as_matrix(____))
+data-lines="0" data-pipe="|>"> +
# Now, let's create a two-mode version 'ison_brandes2' and graph it.
+ison_brandes2 <- ison_brandes %>% rename(type = twomode_type)
+autographr(____)
-
autographr(ison_brandes)
-autographr(ison_brandes2)
-(mat <- as_matrix(ison_brandes))
+data-lines="0" data-pipe="|>"> +
# plot the one-mode version
+autographr(ison_brandes)
+ison_brandes2 <- ison_brandes %>% rename(type = twomode_type)
+# plot the two-mode version
+autographr(ison_brandes2)

The network is anonymous, but I think it would be nice to add some -names, even if it’s just pretend. Luckily, {migraph} has a +names, even if it’s just pretend. Luckily, {manynet} has a function for this. This makes plotting the network just a wee bit more accessible and interpretable:

+data-lines="0" data-pipe="|>"> +
ison_brandes <- to_named(ison_brandes)
-
# Let's use the to_named() function to assign 
-# the networks data to a relevant object name
-
-ison_brandes <- to_named(____)
-ison_brandes2 <- to_named(____)
-
-
-
ison_brandes <- to_named(ison_brandes)
-ison_brandes2 <- to_named(ison_brandes2)
-
-
+data-lines="0" data-pipe="|>">
# Now, let's graph using the object names: "ison_brandes"
-
 autographr(____)
+data-lines="0" data-pipe="|>">
ison_brandes <- to_named(ison_brandes)
-ison_brandes2 <- to_named(ison_brandes2)
+# plot network with names
 autographr(ison_brandes)

Note that you will likely get a different set of names, as they are assigned randomly from a pool of (American) first names.

+
+

Degree centrality

Let’s start with calculating degree, as it is easy to calculate yourself. Just sum the rows or columns of the matrix!

+data-lines="0" data-pipe="|>">
+data-lines="0" data-pipe="|>">
# We can calculate degree centrality like this:
-
+(mat <- as_matrix(ison_brandes))
 (degrees <- rowSums(mat))
 rowSums(mat) == colSums(mat)
+data-lines="0" data-pipe="|>">
# Or by using a built in command in migraph like this:
-
 node_degree(ison_brandes, normalized = FALSE)
-
(degrees <- rowSums(mat))
+data-lines="0" data-pipe="|>">
+
# manually calculate degree centrality
+mat <- as_matrix(ison_brandes)
+degrees <- rowSums(mat)
 rowSums(mat) == colSums(mat)
 # You can also just use a built in command in migraph though:
 node_degree(ison_brandes, normalized = FALSE)
@@ -228,62 +217,83 @@

Calculating different centrality measures

first look at this distribution, though there are more elaborate ways to do this in base and grid graphics.

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
-
plot(node_degree(ison_brandes), "h") +
-  plot(node_degree(ison_brandes), "d")
+data-lines="0" data-pipe="|>"> +
# distribution of degree centrality scores of nodes
+plot(node_degree(ison_brandes))
+
+

What’s plotted here by default is both the degree distribution as a +histogram, as well as a density plot overlaid on it. What kind of shape +does this have?

+
+

Other centralities

Other measures of centrality can be a little trickier to calculate by hand. Fortunately, we can use functions from {migraph} to -help:

+help calculate the betweenness, closeness, and eigenvector centralities +for each node in the network. Let’s collect the vectors of these +centralities for the ison_brandes dataset:

-
# Let's explore this using the "ison_brandes" dataset
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
# Use the node_betweenness() function to calculate the
 # betweenness centralities of nodes in a network
-
 node_betweenness(ison_brandes)
+data-lines="0" data-pipe="|>">
# Use the node_closeness() function to calculate the 
 # closeness centrality of nodes in a network
-
 node_closeness(ison_brandes)
+data-lines="0" data-pipe="|>">
# Use the node_eigenvector() function to calculate 
 # the eigenvector centrality of nodes in a network
-
 node_eigenvector(ison_brandes)
+data-lines="0" data-pipe="|>">
node_betweenness(ison_brandes)
 node_closeness(ison_brandes)
 node_eigenvector(ison_brandes)
 # TASK: Can you create degree distributions for each of these?
+

What is returned here are vectors of betweenness, closeness, and +eigenvector scores for the nodes in the network. But what do they mean? +Try to answer the following questions for yourself:

+
    +
  • in what ways is a higher degree actor more ‘central’?
  • +
  • can you explain why a node that has the smallest sum of geodesic +distances to all other nodes is said to be ‘central’?
  • +
  • why would an actor lying ‘between’ two other actors be +‘central’?
  • +
  • what does Bonacich mean when he says that power and influence are +not the same thing?
  • +
  • can you think of a real-world example when an actor might be central +but not powerful, or powerful but not central?
  • +

Note that all centrality measures in {migraph} return -normalized scores by default – for the raw scores, just add -normalized = FALSE as an extra argument.

+normalized scores by default – for the raw scores, include +normalized = FALSE in the function as an extra +argument.

+
-
-

Plotting different centrality measures

+
+

Plotting centrality

It is straightforward in {migraph} to highlight nodes and ties with maximum or minimum (e.g. degree) scores. If the vector is numeric (i.e. a “measure”), then this can be easily converted into a @@ -293,13 +303,15 @@

Plotting different centrality measures

autographr() argument “node_color” we can highlight which node or nodes hold the maximum score in red.

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
-
ison_brandes %>%
+data-lines="0" data-pipe="|>">
+
# plot the network, highlighting the node with the highest centrality score with a different colour
+ison_brandes %>%
   add_node_attribute("color", node_is_max(node_degree(ison_brandes))) %>%
   autographr(node_color = "color")
 
@@ -318,13 +330,14 @@ 

Plotting different centrality measures

How neat! Try it with the two-mode version. What can you see?

+data-lines="0" data-pipe="|>">
# Instead of "ison_brandes", use "ison_brandes2"
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
ison_brandes2 %>%
   add_node_attribute("color", node_is_max(node_degree(ison_brandes2))) %>%
   autographr(node_color = "color")
@@ -352,26 +365,27 @@ 

Plotting different centrality measures

Calculating centralization

-

{migraph} also implements centralization functions. Here -we are no longer interested in the level of the node, but in the level -of the whole graph, so the syntax is:

+

{migraph} also implements network centralization +functions. Here we are no longer interested in the level of the node, +but in the level of the whole network, so the syntax replaces +node_ with network_:

-
# We will now look at the same centralization measures for the entire graph or network by 
-# calling the same functions as those used for nodes but instead of "node_", replace it with "network_"
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
network_degree(ison_brandes)
 network_betweenness(ison_brandes)
 network_closeness(ison_brandes)
 network_eigenvector(ison_brandes)

By default, scores are printed to 3 decimal places, but this can be -modified and, in any case, the unrounded values are retained internally -and passed on.

+modified and, in any case, the unrounded values are retained internally. +This means that even if rounded values are printed, as much precision as +is available is used in further calculations.

Note that for centralization in two-mode networks, two values are given (as a named vector), since normalization typically depends on the (asymmetric) number of nodes in each mode.

@@ -380,12 +394,12 @@

Calculating centralization

{patchwork} package is here to help.

+data-lines="0" data-pipe="|>">
+data-lines="0" data-pipe="|>">
ison_brandes <- ison_brandes %>%
   add_node_attribute("degree",
                               node_is_max(node_degree(ison_brandes))) %>%
@@ -414,6 +428,14 @@ 

Calculating centralization

+
+
+
+
+
+ +
+

Tasks

@@ -428,6 +450,7 @@

Tasks

library(migraph) library(patchwork) knitr::opts_chunk$set(echo = FALSE) +ison_brandes2 <- ison_brandes %>% rename(type = twomode_type) @@ -542,12 +569,17 @@

Tasks

@@ -605,20 +638,24 @@

Tasks

@@ -688,11 +730,16 @@

Tasks

@@ -812,11 +860,15 @@

Tasks

@@ -855,11 +906,15 @@

Tasks

+ + + @@ -932,12 +1023,12 @@

Tasks

diff --git a/inst/tutorials/tutorial4/community.Rmd b/inst/tutorials/tutorial4/community.Rmd index a29a95b1..289c5ad7 100644 --- a/inst/tutorials/tutorial4/community.Rmd +++ b/inst/tutorials/tutorial4/community.Rmd @@ -22,18 +22,18 @@ The data we're going to use here, "ison_algebra", is included in the `{manynet}` Do you remember how to call the data? Can you find out some more information about it? -```{r data, exercise = TRUE} +```{r data, exercise = TRUE, purl = FALSE} ``` -```{r data-hint-1} +```{r data-hint-1, purl = FALSE} # Let's call and load the 'ison_algebra' dataset data("ison_algebra", package = "manynet") # Or you can retrieve like this: ison_algebra <- manynet::ison_algebra ``` -```{r data-hint-2} +```{r data-hint-2, purl = FALSE} # If you want to learn more about the 'ison_algebra' dataset, use the following function (below) ?manynet::ison_algebra ``` @@ -41,11 +41,15 @@ ison_algebra <- manynet::ison_algebra ```{r data-solution} data("ison_algebra", package = "manynet") ?manynet::ison_algebra +# If you want to see the network object, you can run the name of the object +ison_algebra +# or print the code with brackets at the front and end of the code +(ison_algebra <- manynet::ison_algebra) ``` -This dataset is multiplex, meaning that it contains -several different types of ties: -friendship, social and task interactions. +We can see after printing the object that the dataset is multiplex, +meaning that it contains several different types of ties: +friendship (friends), social (social) and task interactions (tasks). ### Adding names @@ -55,15 +59,15 @@ Luckily, `{manynet}` has a function for this, `to_named()`. This makes plotting the network just a wee bit more accessible and interpretable. Let's try adding names and graphing the network now: -```{r addingnames, exercise=TRUE, exercise.setup = "data"} +```{r addingnames, exercise=TRUE, exercise.setup = "data", purl = FALSE} ``` -```{r addingnames-hint-1} +```{r addingnames-hint-1, purl = FALSE} ison_algebra <- to_named(ison_algebra) ``` -```{r addingnames-hint-2} +```{r addingnames-hint-2, purl = FALSE} autographr(ison_algebra) ``` @@ -78,33 +82,35 @@ as they are assigned randomly from a pool of (American) first names. ### Separating multiplex networks As a multiplex network, -there are actually three different types of tie in this network. +there are actually three different types of ties (friends, social, and tasks) +in this network. We can extract them and graph them separately using `to_uniplex()`: -```{r separatingnets, exercise=TRUE, exercise.setup = "data"} +```{r separatingnets, exercise=TRUE, exercise.setup = "data", purl = FALSE} ``` -```{r separatingnets-hint-1} -# let's focus on the 'friends' attribute +```{r separatingnets-hint-1, purl = FALSE} +# to_uniplex extracts ties of a single type, +# focusing on the 'friends' tie attribute here friends <- to_uniplex(ison_algebra, "friends") gfriend <- autographr(friends) + ggtitle("Friendship") ``` -```{r separatingnets-hint-2} -# let's focus on the 'social' attribute +```{r separatingnets-hint-2, purl = FALSE} +# now let's focus on the 'social' tie attribute social <- to_uniplex(ison_algebra, "social") gsocial <- autographr(social) + ggtitle("Social") ``` -```{r separatingnets-hint-3} -# let's focus on the 'tasks' attribute +```{r separatingnets-hint-3, purl = FALSE} +# and the 'tasks' tie attribute tasks <- to_uniplex(ison_algebra, "tasks") gtask <- autographr(tasks) + ggtitle("Task") ``` -```{r separatingnets-hint-4} -# now, let's compare the each attribute's graph, side-by-side +```{r separatingnets-hint-4, purl = FALSE} +# now, let's compare each attribute's graph, side-by-side gfriend + gsocial + gtask # if you get an error here, you may need to install and load # the package 'patchwork'. @@ -122,6 +128,7 @@ gsocial <- autographr(social) + ggtitle("Social") tasks <- to_uniplex(ison_algebra, "tasks") gtask <- autographr(tasks) + ggtitle("Task") +# We now have three separate networks depicting each type of tie from the ison_algebra network: gfriend + gsocial + gtask ``` @@ -141,17 +148,18 @@ density, reciprocity, transitivity, and components. Because this is a directed network, we can calculate the density as: -```{r dens-explicit, exercise=TRUE, exercise.setup = "separatingnets"} +```{r dens-explicit, exercise=TRUE, exercise.setup = "separatingnets", purl = FALSE} ``` ```{r dens-explicit-solution} +# calculating network density manually according to equation network_ties(tasks)/(network_nodes(tasks)*(network_nodes(tasks)-1)) ``` but we can also just use the `{migraph}` function... -```{r dens, exercise=TRUE, exercise.setup = "separatingnets"} +```{r dens, exercise=TRUE, exercise.setup = "separatingnets", purl = FALSE} ``` @@ -163,7 +171,7 @@ Note that the various measures in `{migraph}` print results to three decimal poi by default, but the underlying result retains the same recurrence. So same result... -```{r dens-qa, echo=FALSE} +```{r dens-qa, echo=FALSE, purl = FALSE} question("Is this network's density high or low?", answer("High", message = "The closer the value is to 1, the more dense the network and the more cohesive the network is as a whole."), @@ -180,30 +188,32 @@ While one could do this by hand, it's more efficient to do this using the `{migraph}` package. Can you guess the correct name of the function? -```{r recip, exercise=TRUE, exercise.setup = "separatingnets"} +```{r recip, exercise=TRUE, exercise.setup = "separatingnets", purl = FALSE} ``` ```{r recip-solution} network_reciprocity(tasks) +# this function calculates the amount of reciprocity in the whole network ``` And let's calculate _transitivity_ in the task network. Again, can you guess the correct name of this function? -```{r trans, exercise=TRUE, exercise.setup = "separatingnets"} +```{r trans, exercise=TRUE, exercise.setup = "separatingnets", purl = FALSE} ``` ```{r trans-solution} network_transitivity(tasks) +# this function calculates the amount of transitivity in the whole network ``` We have collected measures of the task network's reciprocity and transitivity, but we still need to interpret these measures. These measures do not speak for themselves. -```{r trans-interp, echo=FALSE} +```{r trans-interp, echo=FALSE, purl = FALSE} question("What can we say about task closure in this network? Choose all that apply.", answer("Transitivity for the task network is 0.568", correct = TRUE), @@ -229,7 +239,7 @@ return the number of _strong_ components for directed networks. For _weak_ components, you will need to first make the network undirected. Remember the difference between weak and strong components? -```{r weak-strong, echo = FALSE} +```{r weak-strong, echo = FALSE, purl = FALSE} question("Weak components...", answer("don't care about tie direction when establishing components.", correct = TRUE), @@ -238,15 +248,18 @@ question("Weak components...", ) ``` -```{r comp-no, exercise=TRUE, exercise.setup = "separatingnets"} +```{r comp-no, exercise=TRUE, exercise.setup = "separatingnets", purl = FALSE} ``` -```{r comp-no-hint-1} +```{r comp-no-hint-1, purl = FALSE} network_components(friends) +# note that friends is a directed network +# you can see this by calling the object 'friends' +# or by running `manynet::is_directed(friends)` ``` -```{r comp-no-hint-2} +```{r comp-no-hint-2, purl = FALSE} # Now let's look at the number of components for objects connected by an undirected edge # Note: to_undirected() returns an object with all tie direction removed, # so any pair of nodes with at least one directed edge @@ -255,11 +268,12 @@ network_components(to_undirected(friends)) ``` ```{r comp-no-solution} +# note that friends is a directed network network_components(friends) network_components(to_undirected(friends)) ``` -```{r comp-interp, echo = FALSE} +```{r comp-interp, echo = FALSE, purl = FALSE} question("How many components are there?", answer("2", message = "There are more than 2 components."), @@ -280,19 +294,25 @@ but maybe we're also interested in which nodes are members of which components? `node_components()` returns a membership vector that can be used to color nodes in `autographr()`: -```{r comp-memb, exercise=TRUE, exercise.setup = "separatingnets"} +```{r comp-memb, exercise=TRUE, exercise.setup = "separatingnets", purl = FALSE} ``` -```{r comp-memb-hint-1} +```{r comp-memb-hint-1, purl = FALSE} friends <- friends %>% mutate(weak_comp = node_components(to_undirected(friends)), strong_comp = node_components(friends)) +# node_components returns a vector of nodes' memberships to components in the network +# here, we are adding the nodes' membership to components as an attribute in the network +# alternatively, we can also use the function `add_node_attribute()` +# eg. `add_node_attribute(friends, "weak_comp", node_components(to_undirected(friends)))` ``` -```{r comp-memb-hint-2} +```{r comp-memb-hint-2, purl = FALSE} autographr(friends, node_color = "weak_comp") + ggtitle("Weak components") + autographr(friends, node_color = "strong_comp") + ggtitle("Strong components") +# by using the 'node_color' argument, we are telling autographr to colour +# the nodes in the graph according to the values of the 'weak_comp' attribute in the network ``` ```{r comp-memb-solution} @@ -303,7 +323,7 @@ autographr(friends, node_color = "weak_comp") + ggtitle("Weak components") + autographr(friends, node_color = "strong_comp") + ggtitle("Strong components") ``` -```{r node-comp-interp, echo = FALSE} +```{r node-comp-interp, echo = FALSE, purl = FALSE} question("Why is there a difference between the weak and strong components results?", answer("Because one node has only incoming ties.", correct = TRUE), @@ -327,20 +347,20 @@ we will concentrate on the main component (the so-called 'giant' component) and consider friendship undirected. Can you guess how to make these changes to the 'friends' network? -```{r manip-fri, exercise=TRUE, exercise.setup = "separatingnets"} +```{r manip-fri, exercise=TRUE, exercise.setup = "separatingnets", purl = FALSE} ``` -```{r manip-fri-hint-1} +```{r manip-fri-hint-1, purl = FALSE} # to_giant() returns an object that includes only the main component without any smaller components or isolates (friends <- to_giant(friends)) ``` -```{r manip-fri-hint-2} +```{r manip-fri-hint-2, purl = FALSE} (friends <- to_undirected(friends)) ``` -```{r manip-fri-hint-3} +```{r manip-fri-hint-3, purl = FALSE} # now, let's graph the new network autographr(friends) ``` @@ -396,11 +416,11 @@ computationally-prohibitive exhaustive enumeration (Brandes et al. 2008))." So let's try and get a community classification using the walktrap algorithm with path lengths of the random walks specified to be 50. -```{r walk, exercise=TRUE, exercise.setup = "separatingnets"} +```{r walk, exercise=TRUE, exercise.setup = "separatingnets", purl = FALSE} ``` -```{r walk-hint-1} +```{r walk-hint-1, purl = FALSE} # let's use the node_walktrap()function to create a hierarchical, # agglomerative algorithm based on random walks, and assign it to # an object @@ -409,7 +429,7 @@ friend_wt <- node_walktrap(friends, times=50) friend_wt # note that it prints pretty, but underlying its just a vector: ``` -```{r walk-hint-2} +```{r walk-hint-2, purl = FALSE} c(friend_wt) # This says that dividing the graph into 2 communities maximises modularity, @@ -419,7 +439,7 @@ which(friend_wt == 1) which(friend_wt == 2) ``` -```{r walk-hint-3} +```{r walk-hint-3, purl = FALSE} # resulting in a modularity of network_modularity(friends, friend_wt) ``` @@ -441,10 +461,10 @@ network_modularity(friends, friend_wt) We can also visualise the clusters on the original network How does the following look? Plausible? -```{r walkplot, exercise=TRUE, exercise.setup = "walk"} +```{r walkplot, exercise=TRUE, exercise.setup = "walk", purl = FALSE} ``` -```{r walkplot-hint-1} +```{r walkplot-hint-1, purl = FALSE} # plot 1: groups by node color friends <- friends %>% @@ -452,14 +472,14 @@ friends <- friends %>% autographr(friends, node_color = "walk_comm") ``` -```{r walkplot-hint-2} +```{r walkplot-hint-2, purl = FALSE} #plot 2: groups by borders -# to be fancy, we could even draw the group borders around the nodes +# to be fancy, we could even draw the group borders around the nodes using the node_group argument autographr(friends, node_group = "walk_comm") ``` -```{r walkplot-hint-3} +```{r walkplot-hint-3, purl = FALSE} # plot 3: group and node colors # or both! @@ -468,13 +488,16 @@ autographr(friends, node_group = "walk_comm") + ggtitle("Walktrap", subtitle = round(network_modularity(friends, friend_wt), 3)) +# the function `round()` rounds the values to a specified number of decimal places +# here, we are telling it to round the network_modularity score to 3 decimal places, +# but the score is exactly 0.27 so only two decimal places are printed. ``` ```{r walkplot-solution} friends <- friends %>% mutate(walk_comm = friend_wt) autographr(friends, node_color = "walk_comm") -# to be fancy, we could even draw the group borders around the nodes +# to be fancy, we could even draw the group borders around the nodes using the node_group argument autographr(friends, node_group = "walk_comm") # or both! autographr(friends, @@ -501,7 +524,7 @@ we will get a hierarchical map (dendrogram) of the communities in the graph. The following works similarly to walktrap, but no need to set a step length. -```{r eb, exercise=TRUE, exercise.setup = "separatingnets"} +```{r eb, exercise=TRUE, exercise.setup = "separatingnets", purl = FALSE} ``` ```{r eb-solution} @@ -516,18 +539,18 @@ here: http://jfaganuk.github.io/2015/01/24/basic-network-analysis/ To visualise the result: -```{r ebplot, exercise=TRUE, exercise.setup = "eb"} +```{r ebplot, exercise=TRUE, exercise.setup = "eb", purl = FALSE} ``` -```{r ebplot-hint-1} +```{r ebplot-hint-1, purl = FALSE} # create an object friends <- friends %>% mutate(eb_comm = friend_eb) ``` -```{r ebplot-hint-2} +```{r ebplot-hint-2, purl = FALSE} # create a graph with a title and subtitle returning the modularity score autographr(friends, @@ -562,17 +585,17 @@ This is very fast, but has the disadvantage of being a greedy algorithm, so it might not produce the best overall community partitioning, although I personally find it both useful and in many cases quite "accurate". -```{r fg, exercise=TRUE, exercise.setup = "separatingnets"} +```{r fg, exercise=TRUE, exercise.setup = "separatingnets", purl = FALSE} ``` -```{r fg-hint-1} +```{r fg-hint-1, purl = FALSE} friend_fg <- node_fast_greedy(friends) friend_fg # Does this result in a different community partition? network_modularity(friends, friend_fg) # Compare this to the edge betweenness procedure ``` -```{r fg-hint-2} +```{r fg-hint-2, purl = FALSE} # Again, we can visualise these communities in different ways: friends <- friends %>% mutate(fg_comm = friend_fg) @@ -581,6 +604,7 @@ autographr(friends, node_group = "fg_comm") + ggtitle("Fast-greedy", subtitle = round(network_modularity(friends, friend_fg), 3)) +# ``` ```{r fg-solution} @@ -602,7 +626,7 @@ See A Clauset, MEJ Newman, C Moore: Finding community structure in very large networks, https://arxiv.org/abs/cond-mat/0408187 -```{r comm-comp, echo=FALSE} +```{r comm-comp, echo=FALSE, purl = FALSE} question("What is the difference between communities and components?", answer("Communities and components are just different terms for the same thing"), answer("Communities are a stricter form of component"), @@ -616,17 +640,17 @@ question("What is the difference between communities and components?", The next dataset, 'ison_southern_women', is also available in `{manynet}`. Let's load and graph the data. -```{r setup-women, exercise=TRUE, exercise.setup = "data"} +```{r setup-women, exercise=TRUE, exercise.setup = "data", purl = FALSE} ``` -```{r setup-women-hint-1} +```{r setup-women-hint-1, purl = FALSE} # let's load the data and analyze it data("ison_southern_women") ison_southern_women ``` -```{r setup-women-hint-2} +```{r setup-women-hint-2, purl = FALSE} autographr(ison_southern_women, node_color = "type") autographr(ison_southern_women, "railway", node_color = "type") ``` @@ -644,7 +668,7 @@ For that, we can obtain a 'projection' of the two-mode network. There are two ways of doing this. The hard way... -```{r hardway, exercise=TRUE, exercise.setup = "setup-women"} +```{r hardway, exercise=TRUE, exercise.setup = "setup-women", purl = FALSE} ``` @@ -656,11 +680,11 @@ event_matrix <- t(twomode_matrix) %*% twomode_matrix Or the easy way: -```{r easyway, exercise=TRUE, exercise.setup = "setup-women"} +```{r easyway, exercise=TRUE, exercise.setup = "setup-women", purl = FALSE} ``` -```{r easyway-hint-1} +```{r easyway-hint-1, purl = FALSE} # women-graph # to_mode1(): Results in a weighted one-mode object that retains the row nodes from # a two-mode object, and weights the ties between them on the basis of their joint @@ -668,9 +692,12 @@ Or the easy way: women_graph <- to_mode1(ison_southern_women) autographr(women_graph) + +# note that projection `to_mode1` involves keeping one type of nodes +# this is different from to_uniplex above, which keeps one type of ties in the network ``` -```{r easyway-hint-2} +```{r easyway-hint-2, purl = FALSE} # event-graph # to_mode2(): Results in a weighted one-mode object that retains the column nodes from # a two-mode object, and weights the ties between them on the basis of their joint ties @@ -690,7 +717,7 @@ autographr(event_graph) `{manynet}` also includes several other options for how to construct the projection. Please see the help file for more details. -```{r otherway, exercise=TRUE, exercise.setup = "setup-women"} +```{r otherway, exercise=TRUE, exercise.setup = "setup-women", purl = FALSE} ``` @@ -704,17 +731,17 @@ autographr(to_mode2(ison_southern_women, similarity = "yule")) + ggtitle("Yule's Which women/events 'bind' which events/women? Let's return to the question of cohesion. -```{r twomode-cohesion, exercise=TRUE, exercise.setup = "setup-women"} +```{r twomode-cohesion, exercise=TRUE, exercise.setup = "setup-women", purl = FALSE} ``` -```{r twomode-cohesion-hint-1} +```{r twomode-cohesion-hint-1, purl = FALSE} # network_equivalency(): Calculate equivalence or reinforcement in a (usually two-mode) network network_equivalency(ison_southern_women) ``` -```{r twomode-cohesion-hint-2} +```{r twomode-cohesion-hint-2, purl = FALSE} # network_transitivity(): Calculate transitivity in a network network_transitivity(women_graph) diff --git a/inst/tutorials/tutorial4/community.html b/inst/tutorials/tutorial4/community.html index 772b21c8..c4f6f411 100644 --- a/inst/tutorials/tutorial4/community.html +++ b/inst/tutorials/tutorial4/community.html @@ -13,7 +13,7 @@ - + Community @@ -112,64 +112,66 @@

Setting up

-

The data we’re going to use here is included in the -{manynet} package. This dataset is multiplex, meaning that -it contains several different types of ties: friendship, social and task -interactions.

+

The data we’re going to use here, “ison_algebra”, is included in the +{manynet} package. Do you remember how to call the data? +Can you find out some more information about it?

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
-
# first, let's load the manynet and migraph packages
-library(manynet)
-library(migraph)
+data-lines="0" data-pipe="|>"> +
# Let's call and load the 'ison_algebra' dataset
+data("ison_algebra", package = "manynet")
+# Or you can retrieve like this:
+ison_algebra <- manynet::ison_algebra
-
# next, let's call and load the 'ison_algebra' dataset
-data("ison_algebra", package = "manynet")
-
-# if you want to learn more about the 'ison_algebra' dataset, use the following function (below)
+data-lines="0" data-pipe="|>">
+
# If you want to learn more about the 'ison_algebra' dataset, use the following function (below)
 ?manynet::ison_algebra
-
library(manynet)
-data("ison_algebra", package = "manynet")
-
-

Note that you do not need to load the package using -library() to get the data. Now you know how to create new -matrices in R, load .csv files, saved .RData files, and data from -packages!

+data-lines="0" data-pipe="|>"> +
data("ison_algebra", package = "manynet")
+?manynet::ison_algebra
+# If you want to see the network object, you can run the name of the object
+ison_algebra
+# or print the code with brackets at the front and end of the code
+(ison_algebra <- manynet::ison_algebra)
+
+

We can see after printing the object that the dataset is multiplex, +meaning that it contains several different types of ties: friendship +(friends), social (social) and task interactions (tasks).

Adding names

-

The network is anonymous, but I think it would be nice to add some -names, even if it’s just pretend. Luckily, {migraph} has a -function for this. This makes plotting the network just a wee bit more -accessible and interpretable:

+

The network is also anonymous, but I think it would be nice to add +some names, even if it’s just pretend. Luckily, {manynet} +has a function for this, to_named(). This makes plotting +the network just a wee bit more accessible and interpretable. Let’s try +adding names and graphing the network now:

+data-lines="0" data-pipe="|>">
+data-lines="0" data-pipe="|>">
ison_algebra <- to_named(ison_algebra)
+data-lines="0" data-pipe="|>">
autographr(ison_algebra)
+data-lines="0" data-pipe="|>">
ison_algebra <- to_named(ison_algebra)
 autographr(ison_algebra)
@@ -179,43 +181,53 @@

Adding names

Separating multiplex networks

As a multiplex network, there are actually three different types of -tie in this network. We can extract them and investigate them separately -using to_uniplex():

+ties (friends, social, and tasks) in this network. We can extract them +and graph them separately using to_uniplex():

+data-lines="0" data-pipe="|>">
-
# let's focus on the 'friends' attribute
+data-diagnostics="1" data-startover="1" data-lines="0"
+data-pipe="|>">
+
# to_uniplex extracts ties of a single type,
+# focusing on the 'friends' tie attribute here
 friends <- to_uniplex(ison_algebra, "friends")
 gfriend <- autographr(friends) + ggtitle("Friendship")
-
# let's focus on the 'social' attribute
+data-diagnostics="1" data-startover="1" data-lines="0"
+data-pipe="|>">
+
# now let's focus on the 'social' tie attribute
 social <- to_uniplex(ison_algebra, "social")
 gsocial <- autographr(social) + ggtitle("Social")
-
# let's focus on the 'tasks' attribute
+data-diagnostics="1" data-startover="1" data-lines="0"
+data-pipe="|>">
+
# and the 'tasks' tie attribute
 tasks <- to_uniplex(ison_algebra, "tasks")
 gtask <- autographr(tasks) + ggtitle("Task")
-
# now, let's compare the each attribute's graph, side-by-side
-gfriend + gsocial + gtask
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>"> +
# now, let's compare each attribute's graph, side-by-side
+gfriend + gsocial + gtask
+# if you get an error here, you may need to install and load
+# the package 'patchwork'.
+# It's highly recommended for assembling multiple plots together.
+# Otherwise you can just plot them separately on different lines.
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
friends <- to_uniplex(ison_algebra, "friends")
 gfriend <- autographr(friends) + ggtitle("Friendship")
 
@@ -225,10 +237,14 @@ 

Separating multiplex networks

tasks <- to_uniplex(ison_algebra, "tasks") gtask <- autographr(tasks) + ggtitle("Task") +# We now have three separate networks depicting each type of tie from the ison_algebra network: gfriend + gsocial + gtask

Note also that these are weighted networks. autographr() -automatically registers these different weights and plots them.

+automatically recognises these different weights and plots them. Where +useful (less dense directed networks), autographr() also +bends reciprocated arcs. What (else) can we say about these three +networks?

@@ -242,22 +258,25 @@

Density

as:

+data-lines="0" data-pipe="|>">
-
network_ties(tasks)/(network_nodes(tasks)*(network_nodes(tasks)-1))
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>"> +
# calculating network density manually according to equation
+network_ties(tasks)/(network_nodes(tasks)*(network_nodes(tasks)-1))

but we can also just use the {migraph} function…

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
network_density(tasks)

Note that the various measures in {migraph} print @@ -274,26 +293,37 @@

Density

Closure

-

Next let’s calculate reciprocity.

+

Next let’s calculate reciprocity in the task network. While +one could do this by hand, it’s more efficient to do this using the +{migraph} package. Can you guess the correct name of the +function?

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
-
network_reciprocity(tasks)
+data-lines="0" data-pipe="|>"> +
network_reciprocity(tasks)
+# this function calculates the amount of reciprocity in the whole network
-

And let’s calculate transitivity.

+

And let’s calculate transitivity in the task network. Again, +can you guess the correct name of this function?

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
-
network_transitivity(tasks)
+data-lines="0" data-pipe="|>"> +
network_transitivity(tasks)
+# this function calculates the amount of transitivity in the whole network
+

We have collected measures of the task network’s reciprocity and +transitivity, but we still need to interpret these measures. These +measures do not speak for themselves.

@@ -305,30 +335,47 @@

Closure

Components

-

Now let’s look at the friend network.

+

Now let’s look at the friendship network, ‘friends’. We’re interested +here in how many components there are. By default, the +network_components() function will return the number of +strong components for directed networks. For weak +components, you will need to first make the network undirected. Remember +the difference between weak and strong components?

+
+
+
+
+
+ +
+
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
-
network_components(friends)
+data-lines="0" data-pipe="|>"> +
network_components(friends)
+# note that friends is a directed network
+# you can see this by calling the object 'friends'
+# or by running `manynet::is_directed(friends)`
+data-lines="0" data-pipe="|>">
# Now let's look at the number of components for objects connected by an undirected edge
-# Note: to_undirected() returns an object that has any edge direction removed, so that 
-# any pair of nodes with at least one directed edge will be connected by an undirected edge 
-# in the new network.
-
+# Note: to_undirected() returns an object with all tie direction removed, 
+# so any pair of nodes with at least one directed edge 
+# will be connected by an undirected edge in the new network.
 network_components(to_undirected(friends))
-
network_components(friends)
+data-lines="0" data-pipe="|>">
+
# note that friends is a directed network
+network_components(friends)
 network_components(to_undirected(friends))
@@ -339,35 +386,51 @@

Components

-

We can use the membership vector in the resulting object to color -nodes:

+

So we know how many components there are, but maybe we’re also +interested in which nodes are members of which components? +node_components() returns a membership vector that can be +used to color nodes in autographr():

+data-lines="0" data-pipe="|>">
+data-lines="0" data-pipe="|>">
friends <- friends %>% 
   mutate(weak_comp = node_components(to_undirected(friends)),
-         strong_comp = node_components(friends))
+ strong_comp = node_components(friends)) +# node_components returns a vector of nodes' memberships to components in the network +# here, we are adding the nodes' membership to components as an attribute in the network +# alternatively, we can also use the function `add_node_attribute()` +# eg. `add_node_attribute(friends, "weak_comp", node_components(to_undirected(friends)))`
+data-lines="0" data-pipe="|>">
autographr(friends, node_color = "weak_comp") + ggtitle("Weak components") +
-autographr(friends, node_color = "strong_comp") + ggtitle("Strong components")
+autographr(friends, node_color = "strong_comp") + ggtitle("Strong components") +# by using the 'node_color' argument, we are telling autographr to colour +# the nodes in the graph according to the values of the 'weak_comp' attribute in the network
+data-lines="0" data-pipe="|>">
friends <- friends %>% 
   mutate(weak_comp = node_components(to_undirected(friends)),
          strong_comp = node_components(friends))
 autographr(friends, node_color = "weak_comp") + ggtitle("Weak components") +
 autographr(friends, node_color = "strong_comp") + ggtitle("Strong components")
+
+
+
+
+
+ +
+
@@ -375,36 +438,36 @@

Community Detection

Ok, the friendship network has 3-4 components, but how many ‘groups’ are there? Just visually, it looks like there are two denser clusters within the main component.

-

Today we’ll use the ‘friend’ subgraph for exploring community +

Today we’ll use the ‘friends’ subgraph for exploring community detection methods. For clarity and simplicity, we will concentrate on the main component (the so-called ‘giant’ component) and consider -friendship undirected:

+friendship undirected. Can you guess how to make these changes to the +‘friends’ network?

+data-lines="0" data-pipe="|>">
-
# let's use to_giant() which returns an object that includes only the main component without any smaller components or isolates
-
+data-lines="0" data-pipe="|>">
+
# to_giant() returns an object that includes only the main component without any smaller components or isolates
 (friends <- to_giant(friends))
+data-lines="0" data-pipe="|>">
(friends <- to_undirected(friends))
+data-lines="0" data-pipe="|>">
# now, let's graph the new network
 autographr(friends)
+data-lines="0" data-pipe="|>">
(friends <- to_giant(friends))
 (friends <- to_undirected(friends))
 autographr(friends)
@@ -412,14 +475,14 @@

Community Detection

Comparing friends before and after these operations, you’ll notice the number of ties decreases as reciprocated directed ties are consolidated into single undirected ties, and the number of nodes -decreases as the couple of isolates are removed.

+decreases as two isolates are removed.

There is no one single best community detection algorithm. Instead there are several, each with their strengths and weaknesses. Since this is a rather small network, we’ll focus on the following methods: -walktrap, edge betweenness, and fast greedy. {igraph} also -includes others though too; all are named cluster_… As you use them, -consider how they portray clusters and consider which one(s) afford a -sensible view of the social world as cohesively organized.

+walktrap, edge betweenness, and fast greedy. (Others are included in +{migraph}/{igraph}) As you use them, consider +how they portray communities and consider which one(s) afford a sensible +view of the social world as cohesively organized.

Walktrap

This algorithm detects communities through a series of short random @@ -450,12 +513,13 @@

Walktrap

So let’s try and get a community classification using the walktrap algorithm with path lengths of the random walks specified to be 50.

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
# let's use the node_walktrap()function to create a hierarchical, 
 # agglomerative algorithm based on random walks, and assign it to
 # an object
@@ -465,7 +529,7 @@ 

Walktrap

+data-lines="0" data-pipe="|>">
c(friend_wt)
 
 # This says that dividing the graph into 2 communities maximises modularity,
@@ -476,13 +540,13 @@ 

Walktrap

+data-lines="0" data-pipe="|>">
# resulting in a modularity of 
 network_modularity(friends, friend_wt)
+data-lines="0" data-pipe="|>">
friend_wt <- node_walktrap(friends, times=50)
 friend_wt # note that it prints pretty, but underlying it is just a vector:
 # c(friend_wt)
@@ -498,12 +562,13 @@ 

Walktrap

We can also visualise the clusters on the original network How does the following look? Plausible?

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
# plot 1: groups by node color
 
 friends <- friends %>% 
@@ -512,15 +577,15 @@ 

Walktrap

+data-lines="0" data-pipe="|>">
#plot 2: groups by borders
 
-# to be fancy, we could even draw the group borders around the nodes
+# to be fancy, we could even draw the group borders around the nodes using the node_group argument
 autographr(friends, node_group = "walk_comm")
+data-lines="0" data-pipe="|>">
# plot 3: group and node colors
 
 # or both!
@@ -528,15 +593,18 @@ 

Walktrap

node_color = "walk_comm", node_group = "walk_comm") + ggtitle("Walktrap", - subtitle = round(network_modularity(friends, friend_wt), 3))
+ subtitle = round(network_modularity(friends, friend_wt), 3)) +# the function `round()` rounds the values to a specified number of decimal places +# here, we are telling it to round the network_modularity score to 3 decimal places, +# but the score is exactly 0.27 so only two decimal places are printed.
+data-lines="0" data-pipe="|>">
friends <- friends %>% 
   mutate(walk_comm = friend_wt)
 autographr(friends, node_color = "walk_comm")
-# to be fancy, we could even draw the group borders around the nodes
+# to be fancy, we could even draw the group borders around the nodes using the node_group argument
 autographr(friends, node_group = "walk_comm")
 # or both!
 autographr(friends, 
@@ -546,7 +614,8 @@ 

Walktrap

subtitle = round(network_modularity(friends, friend_wt), 3))

This can be helpful when polygons overlap to better identify -membership Or use node color and size to indicate other attributes…

+membership Or you can use node color and size to indicate other +attributes…

Edge Betweenness

@@ -562,12 +631,13 @@

Edge Betweenness

The following works similarly to walktrap, but no need to set a step length.

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
friend_eb <- node_edge_betweenness(friends)
 friend_eb
@@ -579,12 +649,13 @@

Edge Betweenness

class="uri">http://jfaganuk.github.io/2015/01/24/basic-network-analysis/

To visualise the result:

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
# create an object
 
 friends <- friends %>% 
@@ -592,7 +663,7 @@ 

Edge Betweenness

+data-lines="0" data-pipe="|>">
# create a graph with a title and subtitle returning the modularity score
 
 autographr(friends, 
@@ -603,7 +674,7 @@ 

Edge Betweenness

+data-lines="0" data-pipe="|>">
friends <- friends %>% 
   mutate(eb_comm = friend_eb)
 autographr(friends, 
@@ -628,19 +699,20 @@ 

Fast Greedy

partitioning, although I personally find it both useful and in many cases quite “accurate”.

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
friend_fg <- node_fast_greedy(friends)
 friend_fg # Does this result in a different community partition?
 network_modularity(friends, friend_fg) # Compare this to the edge betweenness procedure
+data-lines="0" data-pipe="|>">
# Again, we can visualise these communities in different ways:
 friends <- friends %>% 
   mutate(fg_comm = friend_fg)
@@ -648,11 +720,12 @@ 

Fast Greedy

node_color = "fg_comm", node_group = "fg_comm") + ggtitle("Fast-greedy", - subtitle = round(network_modularity(friends, friend_fg), 3))
+ subtitle = round(network_modularity(friends, friend_fg), 3)) +#
+data-lines="0" data-pipe="|>">
friend_fg <- node_fast_greedy(friends)
 friend_fg # Does this result in a different community partition?
 network_modularity(friends, friend_fg) # Compare this to the edge betweenness procedure
@@ -669,38 +742,45 @@ 

Fast Greedy

See A Clauset, MEJ Newman, C Moore: Finding community structure in very large networks, https://arxiv.org/abs/cond-mat/0408187

+
+
+
+
+
+ +
+

Two-mode network: Southern women

-

The next dataset is also available in migraph. Let’s take a look at -the loaded objects.

+

The next dataset, ‘ison_southern_women’, is also available in +{manynet}. Let’s load and graph the data.

+data-lines="0" data-pipe="|>">
+data-lines="0" data-pipe="|>">
# let's load the data and analyze it
 data("ison_southern_women")
 ison_southern_women
+data-lines="0" data-pipe="|>">
autographr(ison_southern_women, node_color = "type")
 autographr(ison_southern_women, "railway", node_color = "type")
+data-lines="0" data-pipe="|>">
data("ison_southern_women")
 ison_southern_women
-autographr(ison_southern_women, node_color = "type")
-autographr(ison_southern_women, "railway", node_color = "type")
+autographr(ison_southern_women, node_color = "type")
@@ -709,35 +789,40 @@

Project two-mode network into two one-mode networks

that, we can obtain a ‘projection’ of the two-mode network. There are two ways of doing this. The hard way…

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
twomode_matrix <- as_matrix(ison_southern_women)
 women_matrix <- twomode_matrix %*% t(twomode_matrix)
 event_matrix <- t(twomode_matrix) %*% twomode_matrix
-

Or the easy way

+

Or the easy way:

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
# women-graph
 # to_mode1(): Results in a weighted one-mode object that retains the row nodes from
 # a two-mode object, and weights the ties between them on the basis of their joint
 # ties to nodes in the second mode (columns)
 
 women_graph <- to_mode1(ison_southern_women)
-autographr(women_graph)
+autographr(women_graph) + +# note that projection `to_mode1` involves keeping one type of nodes +# this is different from to_uniplex above, which keeps one type of ties in the network
+data-lines="0" data-pipe="|>">
# event-graph
 # to_mode2(): Results in a weighted one-mode object that retains the column nodes from
 # a two-mode object, and weights the ties between them on the basis of their joint ties
@@ -748,21 +833,22 @@ 

Project two-mode network into two one-mode networks

+data-lines="0" data-pipe="|>">
women_graph <- to_mode1(ison_southern_women)
 autographr(women_graph)
 event_graph <- to_mode2(ison_southern_women)
 autographr(event_graph)
-

{migraph} also includes several other options for how to +

{manynet} also includes several other options for how to construct the projection. Please see the help file for more details.

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
autographr(to_mode2(ison_southern_women, similarity = "jaccard")) + ggtitle("Jaccard") +
 autographr(to_mode2(ison_southern_women, similarity = "rand")) + ggtitle("Rand") +
 autographr(to_mode2(ison_southern_women, similarity = "pearson")) + ggtitle("Pearson") +
@@ -772,19 +858,21 @@ 

Project two-mode network into two one-mode networks

question of cohesion.

+data-lines="0" data-pipe="|>">
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
# network_equivalency(): Calculate equivalence or reinforcement in a (usually two-mode) network
 
 network_equivalency(ison_southern_women)
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
# network_transitivity(): Calculate transitivity in a network
 
 network_transitivity(women_graph)
@@ -792,27 +880,20 @@ 

Project two-mode network into two one-mode networks

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
network_equivalency(ison_southern_women)
 network_transitivity(women_graph)
 network_transitivity(event_graph)

What do we learn from this?

-
-
-
-
-
- -
-

Task/Unit Test

  1. Produce a plot comparing 3 community detection procedures used here -on a (women) projection of the ison_southern_women dataset. Identify +on a (women) projection of the ‘ison_southern_women’ dataset. Identify which you prefer, and explain why.
  2. Explain in no more than a paragraph why projection can lead to misleading transitivity measures.
  3. @@ -820,6 +901,9 @@

    Task/Unit Test

    lead to group identity. @@ -853,20 +937,23 @@

    Task/Unit Test

    @@ -892,13 +980,14 @@

    Task/Unit Test

    @@ -1087,23 +1184,27 @@

    Task/Unit Test

    + + - + + - - + - + + - - + + + + + + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - -
diff --git a/inst/tutorials/tutorial5/equivalence.Rmd b/inst/tutorials/tutorial5/position.Rmd similarity index 53% rename from inst/tutorials/tutorial5/equivalence.Rmd rename to inst/tutorials/tutorial5/position.Rmd index 27452949..e665d524 100644 --- a/inst/tutorials/tutorial5/equivalence.Rmd +++ b/inst/tutorials/tutorial5/position.Rmd @@ -1,5 +1,5 @@ --- -title: "Equivalence" +title: "Position" author: "by James Hollway" output: learnr::tutorial: @@ -18,72 +18,68 @@ knitr::opts_chunk$set(echo = FALSE) ## Setting up -The data we're going to use here is included in the `{manynet}` package. -This dataset is multiplex, meaning that it contains -several different types of ties: -friendship, social and task interactions. +For this session, we're going to use the "ison_algebra" dataset included in the `{manynet}` package. +Do you remember how to call the data? +Can you find out some more information about it via its help file? -```{r data, exercise = TRUE} +```{r data, exercise = TRUE, purl = FALSE} ``` -```{r data-hint} +```{r data-hint-1, purl = FALSE} # Let's call and load the 'ison_algebra' dataset data("ison_algebra", package = "manynet") +# Or you can retrieve like this: +ison_algebra <- manynet::ison_algebra +``` +```{r data-hint-2, purl = FALSE} # If you want to learn more about the 'ison_algebra' dataset, use the following function (below) ?manynet::ison_algebra ``` ```{r data-solution} data("ison_algebra", package = "manynet") +?manynet::ison_algebra +# If you want to see the network object, you can run the name of the object +# ison_algebra +# or print the code with brackets at the front and end of the code +# (ison_algebra <- manynet::ison_algebra) ``` -Note that you do not need to load the package using `library()` to get the data. -Now you know how to create new matrices in R, load .csv files, -saved .RData files, and data from packages! +We can see that the dataset is multiplex, +meaning that it contains several different types of ties: +friendship (friends), social (social) and task interactions (tasks). ### Separating multiplex networks As a multiplex network, there are actually three different types of ties in this network. -We can extract them and investigate them separately using `to_uniplex()`: - -```{r separatingnets, exercise=TRUE, exercise.setup = "data"} +We can extract them and investigate them separately using `to_uniplex()`. +Within the parentheses, put the multiplex object's name, +and then as a second argument put the name of the tie attribute in quotation marks. +Once you have extracted all three networks, +graph them and add a descriptive title. -``` - -```{r separatingnets-hint-1} -# Let's focus on the 'friends' attribute +```{r separatingnets, exercise=TRUE, exercise.setup = "data", purl = FALSE} -friends <- to_uniplex(ison_algebra, "friends") -gfriend <- autographr(friends) + ggtitle("Friendship") ``` -```{r separatingnets-hint-2} -# Let's focus on the 'social' attribute - -social <- to_uniplex(ison_algebra, "social") -gsocial <- autographr(social) + ggtitle("Social") +```{r separatingnets-hint-1, purl = FALSE} +# Here's the basic idea/code syntax you will need to extract each type of network +# You will want to replace +____ <- to_uniplex(ison_algebra, _____) ``` -```{r separatingnets-hint-3} -# Let's focus on the 'tasks' attribute - -tasks <- to_uniplex(ison_algebra, "tasks") -gtask <- autographr(tasks) + ggtitle("Task") -``` - -```{r separatingnets-hint-4} +```{r separatingnets-hint-4, purl = FALSE} # Now, let's compare the each attribute's graph, side-by-side by using "+" # Note: using "/" after each graph will order them vertically; however, it might not be best way - +# See for example: +gfriend <- autographr(friends) + ggtitle("Friendship") gfriend + gsocial + gtask ``` ```{r separatingnets-solution} -# Your code should look like this: - friends <- to_uniplex(ison_algebra, "friends") gfriend <- autographr(friends) + ggtitle("Friendship") @@ -97,56 +93,110 @@ gfriend + gsocial + gtask ``` Note also that these are weighted networks. -`autographr()` automatically registers these different weights and plots them. +`autographr()` automatically recognises these different weights and plots them. + +```{r strongties-qa, echo=FALSE, purl = FALSE} +question("If we interpret ties with higher weights as strong ties, and lesser weights as weak ties, then, according to network theory, where would we expect novel information to come from?", + answer("Weak ties", + correct = TRUE, + message = learnr::random_praise()), + answer("Strong ties", + message = learnr::random_encouragement()), + answer("Isolates", + message = learnr::random_encouragement()), + answer("Highest degree nodes", + message = learnr::random_encouragement()), + random_answer_order = TRUE, + allow_retry = TRUE +) +``` ## Structural Holes and Constraint -Where might innovation be most likely to occur in these networks? -Let's take a look at which actors are least constrained +Our first question for this network, is where innovation and creative ideas +might be expected to appear. + +```{r structinnov-qa, echo=FALSE, purl = FALSE} +question("Which network concepts are associated with innovation?", + answer("Structural holes", + correct = TRUE, + message = learnr::random_praise()), + answer("Structural folds", + correct = TRUE), + answer("Structural balance", + message = learnr::random_encouragement()), + answer("Structural equivalence", + message = learnr::random_encouragement()), + answer("Structuralism", + message = learnr::random_encouragement()), + random_answer_order = TRUE, + allow_retry = TRUE +) +``` + +### Measuring structural holes + +```{r shmeasures-qa, echo=FALSE, purl = FALSE} +question("There are a number of measures that might be used to approximate the concept of structural holes. Select all that apply.", + answer("Constraint", + correct = TRUE, + message = learnr::random_praise()), + answer("Effective size", + correct = TRUE), + answer("Bridges", + correct = TRUE), + answer("Redundancy", + correct = TRUE), + answer("Efficiency", + correct = TRUE), + answer("Hierarchy", + correct = TRUE), + random_answer_order = TRUE, + allow_retry = TRUE +) +``` + +Let's take a look at which actors are least _constrained_ by their position in the *task* network to begin with. `{migraph}` makes this easy enough with the `node_constraint()` function. -```{r objects-setup} -friends <- to_uniplex(ison_algebra, "friends") -social <- to_uniplex(ison_algebra, "social") -tasks <- to_uniplex(ison_algebra, "tasks") +```{r objects-setup, purl=FALSE} +alge <- to_named(ison_algebra) +friends <- to_uniplex(alge, "friends") +social <- to_uniplex(alge, "social") +tasks <- to_uniplex(alge, "tasks") ``` -```{r constraint, exercise = TRUE, exercise.setup = "objects-setup"} +```{r constraint, exercise = TRUE, exercise.setup = "objects-setup", purl = FALSE} ``` -```{r constraint-hint} +```{r constraint-hint, purl = FALSE} node_constraint(____) - -# Don't forget we want to look at which actors are least constrained by their position -# in the 'tasks' network +# Don't forget we want to look at which actors are least constrained by their position in the 'tasks' network ``` ```{r constraint-solution} node_constraint(tasks) ``` -We see that this function returns a vector of -constraint scores that can range between 0 and 1. -Let's size the nodes according to this score, -and identify the node with the minimum constraint score. +This function returns a vector of constraint scores that can range between 0 and 1. +Let's graph the network again, sizing the nodes according to this score. +We can also identify the node with the minimum constraint score using `node_is_min()`. -```{r constraintplot, exercise=TRUE, exercise.setup = "objects-setup"} +```{r constraintplot, exercise=TRUE, exercise.setup = "objects-setup", purl = FALSE} ``` -```{r constraintplot-hint-1} -tasks <- tasks %>% mutate(low_constraint = node_is_min(node_constraint(____))) +```{r constraintplot-hint-1, purl = FALSE} +tasks <- tasks %>% + mutate(constraint = node_constraint(____), + low_constraint = node_is_min(node_constraint(____))) # Don't forget, we are still looking at the 'tasks' network ``` -```{r constraintplot-hint-2} -tasks <- tasks %>% mutate(low_constraint = node_is_min(node_constraint(tasks))) -``` - -```{r constraintplot-hint-3} +```{r constraintplot-hint-3, purl = FALSE} # Now, let's graph the network # Note 1: we are looking at the 'tasks' network # Note 2: we are interested in the actors 'least constrained' by their position @@ -154,31 +204,49 @@ tasks <- tasks %>% mutate(low_constraint = node_is_min(node_constraint(tasks))) autographr(____, node_color = "____") ``` -```{r constraintplot-hint-4} -autographr(tasks, node_color = "low_constraint") +```{r constraintplot-hint-4, purl = FALSE} +autographr(tasks, node_size = "constraint", node_color = "low_constraint") ``` ```{r constraintplot-solution} -# Your code should look like this: - -tasks <- tasks %>% mutate(low_constraint = node_is_min(node_constraint(tasks))) -autographr(tasks, node_color = "low_constraint") +tasks <- tasks %>% + mutate(constraint = node_constraint(tasks), + low_constraint = node_is_min(node_constraint(tasks))) +autographr(tasks, node_size = "constraint", node_color = "low_constraint") ``` -Why minimum? And what can we learn from this plot +Why minimum? Because constraint measures how well connected each node's partners are, +with the implication that having few partners that are already connected to each other puts a node in an advantageous position to identify and share novel solutions to problems. +So what can we learn from this plot about where innovation might occur within this network? ## Structural Equivalence -Now we are going to identify and interpret the roles -or relations between a set of structurally equivalent positions. +Next we might ask ourselves what (other) roles there are in the network? +We want to know who plays what role in this algebra class. +Let us begin with structural equivalence. + +```{r equiv-qa, echo=FALSE, purl = FALSE} +question("Structural equivalence means identifying classes of nodes with...", + answer("same/similar tie partners.", + correct = TRUE, + message = learnr::random_praise()), + answer("same/similar pattern of ties.", + message = "This is the definition for regular equivalence."), + answer("same/similar distance from all others.", + message = "This is the definition for automorphic equivalence.") +) +``` + We're going to identify structurally equivalent positions across all the data that we have, including 'task', 'social', and 'friend' ties. +So that is, we are using the multiplex `ison_algebra` dataset again and not +a uniplex subgraph thereof. ### Finding structurally equivalent classes In `{migraph}`, finding how the nodes of a network can be partitioned -into structurally equivalent classes is as easy as: +into structurally equivalent classes can be as easy as: ```{r find-se, exercise = TRUE, exercise.setup = "data"} node_structural_equivalence(ison_algebra) @@ -189,6 +257,8 @@ ison_algebra %>% ``` But actually, a lot is going on behind the scenes here that we can unpack. +Understanding what is going on behind the scenes is important for understanding +how these classes are identified and how to interpret them. ### Step one: starting with a census @@ -196,33 +266,29 @@ All equivalence classes are based on nodes' similarity across some profile of mo In `{migraph}`, we call these motif *censuses*. Any kind of census can be used, and `{migraph}` includes a few options, but `node_structural_equivalence()` is based off of the census of all the nodes' ties, -both outgoing and incoming ties, to reveal their relationships to tie partners. +both outgoing and incoming ties, to characterise their relationships to tie partners. -```{r construct-cor, exercise = TRUE, exercise.setup = "data"} +```{r construct-cor, exercise = TRUE, exercise.setup = "data", purl = FALSE} ``` -```{r construct-cor-hint-1} +```{r construct-cor-hint-1, purl = FALSE} # Let's use the node_tie_census() function # The function accepts an object such as a dataset # Hint: Which dataset are we using in this tutorial? - node_tie_census(____) ``` -```{r construct-cor-hint-2} +```{r construct-cor-hint-2, purl = FALSE} node_tie_census(ison_algebra) ``` -```{r construct-cor-hint-3} +```{r construct-cor-hint-3, purl = FALSE} # Now, let's get the dimensions of an object via the dim() function - dim(node_tie_census(ison_algebra)) ``` ```{r construct-cor-solution} -# Your code should look like this: - node_tie_census(ison_algebra) dim(node_tie_census(ison_algebra)) ``` @@ -234,31 +300,40 @@ our 16 nodes might have across these three networks. Note also that the result is a weighted matrix; what would you do if you wanted it to be binary? -```{r construct-binary, exercise = TRUE, exercise.setup = "data"} -# THIS IS A SUGGESTION -# PERHAPS WE CAN MAKE THIS ANOTHER EXERCISE OR MCQ +```{r construct-binary, exercise = TRUE, exercise.setup = "data", purl = FALSE} ``` -```{r construct-binary-solution} +```{r construct-binary-hint, purl = FALSE} +# we could convert the result using as.matrix, returning the ties +as.matrix((node_tie_census(ison_algebra)>0)+0) + +``` +```{r construct-binary-solution} +# But it's easier to simplify the network by removing the classification into different types of ties. +# Note that this also reduces the total number of possible paths between nodes +ison_algebra %>% + select_ties(-c(friends, social, tasks)) %>% + node_tie_census() ``` Note that `node_tie_census()` does not need to be passed to `node_structural_equivalence()` --- -this is done automatically --- -but the more generic `node_equivalence()` can be used with whichever tie census is desired. +this is done automatically! +However, the more generic `node_equivalence()` is available and can be used with whichever tie census is desired. Feel free to explore using some of the other censuses available in `{migraph}`, though some common ones are already used in the other equivalence convenience functions, -`node_regular_equivalence()` and `node_automorphic_equivalence()`. +e.g. `node_triad_census()` in `node_regular_equivalence()` +and `node_path_census()` in `node_automorphic_equivalence()`. ### Step two: growing a tree of similarity -The next part is all done internally, -though there are several important parameters that can be set to obtain different results. +The next part takes this census and creates a dendrogram based on distance or dissimilarity among the nodes' census profiles. +This is all done internally within e.g. `node_structural_equivalence()`, +though there are two important parameters that can be set to obtain different results. -There are two main parameters that can be set here. First, users can set the type of distance measure used. -This is passed on to `stats::dist()`, +For enthusiasts, this is passed on to `stats::dist()`, so that help page should be consulted for more details. By default `"euclidean"` is used. @@ -271,20 +346,22 @@ We can see the difference from varying the clustering algorithm and/or distance by plotting the dendrograms (hidden) in the output from `node_structural_equivalence()`: ```{r varyclust, exercise = TRUE, exercise.setup = "data"} +alge <- to_named(ison_algebra) # fake names to make comparison clearer +plot(node_structural_equivalence(alge, + cluster = "hier", distance = "euclidean")) -``` - -```{r varyclust-solution} -plot(node_structural_equivalence(ison_algebra, cluster = "hier", distance = "euclidean")) - -plot(node_structural_equivalence(ison_algebra, cluster = "hier", distance = "manhattan")) +# changing the type of distance used +plot(node_structural_equivalence(alge, + cluster = "hier", distance = "manhattan")) -plot(node_structural_equivalence(ison_algebra, cluster = "concor")) +# changing the clustering algorithm +plot(node_structural_equivalence(alge, + cluster = "concor", distance = "euclidean")) ``` -```{r scale-interp, echo = FALSE} +```{r scale-interp, echo = FALSE, purl = FALSE} question("Do you see any differences?", - answer("Yes", correct = TRUE), + answer("Yes", correct = TRUE, message = learnr::random_praise()), answer("No"), allow_retry = TRUE) ``` @@ -297,14 +374,14 @@ Basically, as we move to the right, we're allowing for more and more dissimilarity among those we cluster together. A fork or branching point indicates the level of dissimilarity at which those two or more nodes would be said to be equivalent. -Where two nodes' branches join/fork is the distance between them, +Where two nodes' branches join/fork represents the maximum distance among all their leaves, so more similar nodes' branches fork closer to the tree's canopy, -and less similar (groups of) nodes don't join until basically they form a trunk. +and less similar (groups of) nodes don't join until they form basically the trunk. Note that with the results using the hierarchical clustering algorithm, the distance directly affects the structure of the tree (and the results). -The CONCOR dendrogram is a bit different though. +The CONCOR dendrogram operates a bit differently to hierarchical clustering though. Instead it represents how converging correlations repeatedly bifurcate the nodes into one of two partitions. As such the 'distance' is really just the (inverse) number of steps @@ -327,36 +404,22 @@ Remember, the further to the right the red line is the more dissimilar we're allowing nodes in the same cluster to be. We could set this ourselves by just passing `k` an integer. -```{r k-discrete, exercise = TRUE, exercise.setup = "data"} - -``` - -```{r k-discrete-hint-1} -# Let's use the node_structural_equivalence() function and set 'k' to 2 - -node_structural_equivalence(____, k = ____) -``` - -```{r k-discrete-hint-2} -# Don't forget to plot the dendrogram using the plot() function - -node_structural_equivalence(ison_algebra, k = 2) -``` - -```{r k-discrete-solution} -plot(node_structural_equivalence(ison_algebra, k = 2)) +```{r k-discrete, exercise = TRUE, exercise.setup = "varyclust"} +plot(node_structural_equivalence(alge, k = 2)) ``` But we're really just guessing. Maybe 2 is not the best `k`? -To establish that, we need to iterate through a number of potential `k`, +To establish what the best `k` is for this clustering exercise, +we need to iterate through a number of potential `k` and consider their fitness by some metric. There are a couple of options here. -One is to consider, for each `k`, how correlated this partition -is with the observed network. -When there is one cluster for each vertex in the network, cell values will be -identical to the observed correlation matrix, and when there is one cluster -for the whole network, the values will all be equal to the average correlation +One is to consider, for each `k`, +how correlated this partition is with the observed network. +When there is one cluster for each vertex in the network, +cell values will be identical to the observed correlation matrix, +and when there is one cluster for the whole network, +the values will all be equal to the average correlation across the observed matrix. So the correlations in each by-cluster matrix are correlated with the observed correlation matrix to see how well each by-cluster matrix fits the data. @@ -385,32 +448,11 @@ which is what `k = "silhouette"` does, can return a somewhat different result to the elbow method. See what we have here, with all other arguments held the same: -```{r elbowsil, exercise = TRUE, exercise.setup = "data"} - -``` - -```{r elbowsil-hint-1} -# Let's recall the node_structural_equivalence() function we used in the previous example - -plot(node_structural_equivalence(____, k = "____")) - -``` - -```{r elbowsil-hint-2} -# Now, instead of looking at 2 clusters, let's look at 'elbow' - -plot(node_structural_equivalence(ison_algebra, k = "elbow")) -``` - -```{r elbowsil-hint-3} -# Now, let's look at 'silhouette' - -plot(node_structural_equivalence(ison_algebra, k = "____")) -``` - -```{r elbowsil-solution} -plot(node_structural_equivalence(ison_algebra, k = "elbow")) -plot(node_structural_equivalence(ison_algebra, k = "silhouette")) +```{r elbowsil, exercise = TRUE, exercise.setup = "varyclust"} +plot(node_structural_equivalence(alge, + k = "elbow")) +plot(node_structural_equivalence(alge, + k = "silhouette")) ``` Ok, so it looks like the elbow method returns `k == 3` as a good trade-off @@ -418,8 +460,8 @@ between fit and parsimony. The silhouette method, by contrast, sees `k == 4` as maximising cluster similarity and dissimilarity. Either is probably fine here, -and there is much debate around how to select the number of clusters anyway, -but the silhouette method seems to do a better job of identifying how unique +and there is much debate around how to select the number of clusters anyway. +However, the silhouette method seems to do a better job of identifying how unique the 16th node is. The silhouette method is also the default in `{migraph}`. @@ -431,23 +473,13 @@ This however can be modified to be higher or lower, e.g. `range = 16`. Finally, one last option is `k = "strict"`, which only assigns nodes to the same partition -if there is a distance of zero between them. +if there is zero distance between them. This is quick and rigorous solution, however oftentimes this misses the point in finding clusters of nodes that, despite some variation, can be considered as similar on some dimension. -```{r strict, exercise = TRUE, exercise.setup = "data"} - -``` - -```{r strict-hint} -# Again, let's use the same function as those used above and change 'k' - -plot(node_structural_equivalence(ison_algebra, k = "____")) -``` - -```{r strict-solution} -plot(node_structural_equivalence(ison_algebra, k = "strict")) +```{r strict, exercise = TRUE, exercise.setup = "varyclust"} +plot(node_structural_equivalence(alge, k = "strict")) ``` Here for example, no two nodes have precisely the same tie-profile, @@ -464,70 +496,55 @@ but here it essentially just reports nodes' identity. Ok, so now we have a result from establishing nodes' membership in structurally equivalent classes. We can graph this of course, as above: -```{r strplot, exercise = TRUE, exercise.setup = "data"} -str_clu <- node_structural_equivalence(ison_algebra) - -ison_algebra %>% - mutate(se = str_clu) %>% +```{r strplot, exercise = TRUE, exercise.setup = "varyclust"} +alge %>% + mutate(se = node_structural_equivalence(alge)) %>% autographr(node_color = "se") ``` -While this plot enters the class information in to our earlier graph, -it doesn't always help us understand how the classes vary. +While this plot adds the structurally equivalent classes information to our earlier graph, +it doesn't really help us understand how the classes relate. +That is, we might be less interested in how the individuals in the different classes relate, and more interested in how the different classes relate in aggregate. One option that can be useful for characterising what the profile of ties (partners) is for each position/equivalence class is to use `summary()`. -Used on a `node_motif` object, it also expects some membership vector, -and then it summarises the census by the partition assignment. +It summarises some census result by a partition (equivalence/membership) assignment. By default it takes the average of ties (values), but this can be tweaked by assigning some other summary statistic as `FUN = `. -```{r summ, exercise = TRUE, exercise.setup = "strplot"} - -``` - -```{r summ-hint-1} -#Let's bring the node_tie_census() function from Step 1 +```{r summ, exercise = TRUE, exercise.setup = "strplot", purl = FALSE} -node_tie_census(ison_algebra) ``` -```{r summ-hint-2} -# Now, let's put it inside the summary() function -# For a description of summary(), use ?summary() - -summary(node_tie_census(ison_algebra)) -``` - -```{r summ-hint-3} -# Great, but we are interested in looking at the structural membership of clusters -# Let's call the object we created a few steps ago: str_clu - -summary(node_tie_census(ison_algebra), +```{r summ-hint, purl = FALSE} +# Let's wrap node_tie_census inside the summary() function +# and pass it a membership result +summary(node_tie_census(____), membership = ____) ``` ```{r summ-solution} -summary(node_tie_census(ison_algebra), - membership = str_clu) +summary(node_tie_census(alge), + membership = node_structural_equivalence(alge)) ``` -Since this node census produces 96 columns, +This node census produces 96 columns, $16 \text{nodes} * 2 \text{directions} * 3 \text{edge types}$, it takes a bit to look through what varies between the different classes as 'blocked'. +But only four rows (the four structurally equivalent classes, according to the default). Another way to do this is to plot the blockmodel as a whole. Passing the `plot()` function an adjacency/incidence matrix along with a membership vector allows the matrix to be sorted and framed (without the membership vector, just the adjacency/incidence matrix is plotted): -```{r block, exercise = TRUE, exercise.setup = "strplot"} +```{r block, exercise = TRUE, exercise.setup = "strplot", purl = FALSE} ``` -```{r block-hint} +```{r block-hint, purl = FALSE} # Let's plot the blockmodel using the plot() function we used for the dendrograms # Instead of node_tie_census() let's us as_matrix() @@ -536,11 +553,14 @@ plot(as_matrix(____), ``` ```{r block-solution} -plot(as_matrix(ison_algebra), - membership = str_clu) +plot(as_matrix(alge), + membership = node_structural_equivalence(alge)) ``` -So, with this information, we might characterise them like so: +By passing the membership argument our structural equivalence results, +the matrix is re-sorted to cluster or 'block' nodes from the same class together. +This can help us interpret the general relationships between classes. +For example, we might characterise them like so: - The first group work together only in reciprocal pairs on tasks, preferring to approach the nerd but also those of the other two roles. @@ -568,33 +588,10 @@ Of course, this means that there can be self-ties or loops, because even if the original network was simple (not complex), any within-class ties will end up becoming loops and thus the network will be complex. -```{r structblock, exercise = TRUE, exercise.setup = "data"} -str_clu <- node_structural_equivalence(ison_algebra) -(bm <- to_blocks(ison_algebra, str_clu)) +```{r structblock, exercise = TRUE, exercise.setup = "varyclust", warning=FALSE} +(bm <- to_blocks(alge, node_structural_equivalence(alge))) -bm <- bm %>% as_tidygraph %>% mutate(name = c("Freaks", "Squares", "Nerds", "Geek")) +bm <- bm %>% as_tidygraph %>% + mutate(name = c("Freaks", "Squares", "Nerds", "Geek")) autographr(bm) ``` - -## Unit Test - -1. Plot labelled, reduced graph of STRUCTURALLY equivalent classes -on the `mpn_elite_usa_advice` network and interpret - -```{r struct-elites, exercise = TRUE} - -``` - -2. Plot labelled, reduced graph of REGULARLY equivalent classes -on the `mpn_elite_usa_advice` network and interpret - -```{r reg-elites, exercise = TRUE} - -``` - -3. Plot labelled, reduced graph of AUTOMORPHICALLY equivalent classes -on the `mpn_elite_usa_advice` network only and interpret - -```{r auto-elites, exercise = TRUE} - -``` diff --git a/inst/tutorials/tutorial5/equivalence.html b/inst/tutorials/tutorial5/position.html similarity index 69% rename from inst/tutorials/tutorial5/equivalence.html rename to inst/tutorials/tutorial5/position.html index 9d05e785..1d149cd2 100644 --- a/inst/tutorials/tutorial5/equivalence.html +++ b/inst/tutorials/tutorial5/position.html @@ -13,9 +13,9 @@ - + -Equivalence +Position @@ -112,32 +112,41 @@

Setting up

-

The data we’re going to use here is included in the -{manynet} package. This dataset is multiplex, meaning that -it contains several different types of ties: friendship, social and task -interactions.

+

The data we’re going to use here, “ison_algebra”, is included in the +{manynet} package. Do you remember how to call the data? +Can you find out some more information about it?

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
-
+data-lines="0" data-pipe="|>">
# Let's call and load the 'ison_algebra' dataset
 data("ison_algebra", package = "manynet")
-
-# If you want to learn more about the 'ison_algebra' dataset, use the following function (below)
+# Or you can retrieve like this:
+ison_algebra <- manynet::ison_algebra
+
+
+
# If you want to learn more about the 'ison_algebra' dataset, use the following function (below)
 ?manynet::ison_algebra
-
data("ison_algebra", package = "manynet")
-
-

Note that you do not need to load the package using -library() to get the data. Now you know how to create new -matrices in R, load .csv files, saved .RData files, and data from -packages!

+data-lines="0" data-pipe="|>"> +
data("ison_algebra", package = "manynet")
+?manynet::ison_algebra
+# If you want to see the network object, you can run the name of the object
+ison_algebra
+# or print the code with brackets at the front and end of the code
+(ison_algebra <- manynet::ison_algebra)
+
+

We can see after printing the object that the dataset is multiplex, +meaning that it contains several different types of ties: friendship +(friends), social (social) and task interactions (tasks).

Separating multiplex networks

As a multiplex network, there are actually three different types of @@ -145,12 +154,13 @@

Separating multiplex networks

separately using to_uniplex():

+data-lines="0" data-pipe="|>">
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
# Let's focus on the 'friends' attribute
 
 friends <- to_uniplex(ison_algebra, "friends")
@@ -158,7 +168,8 @@ 

Separating multiplex networks

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
# Let's focus on the 'social' attribute
 
 social <- to_uniplex(ison_algebra, "social")
@@ -166,7 +177,8 @@ 

Separating multiplex networks

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
# Let's focus on the 'tasks' attribute
 
 tasks <- to_uniplex(ison_algebra, "tasks")
@@ -174,7 +186,8 @@ 

Separating multiplex networks

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
# Now, let's compare the each attribute's graph, side-by-side by using "+"
 # Note: using "/" after each graph will order them vertically; however, it might not be best way
 
@@ -182,7 +195,8 @@ 

Separating multiplex networks

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
# Your code should look like this:
 
 friends <- to_uniplex(ison_algebra, "friends")
@@ -210,12 +224,12 @@ 

Structural Holes and Constraint

node_constraint() function.

+data-lines="0" data-pipe="|>">
+data-lines="0" data-pipe="|>">
node_constraint(____)
 
 # Don't forget we want to look at which actors are least constrained by their position 
@@ -223,7 +237,7 @@ 

Structural Holes and Constraint

+data-lines="0" data-pipe="|>">
node_constraint(tasks)

We see that this function returns a vector of constraint scores that @@ -231,24 +245,27 @@

Structural Holes and Constraint

and identify the node with the minimum constraint score.

+data-lines="0" data-pipe="|>">
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
tasks <- tasks %>% mutate(low_constraint = node_is_min(node_constraint(____)))
 
 # Don't forget, we are still looking at the 'tasks' network
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
tasks <- tasks %>% mutate(low_constraint = node_is_min(node_constraint(tasks)))
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
# Now, let's graph the network
 # Note 1: we are looking at the 'tasks' network
 # Note 2: we are interested in the actors 'least constrained' by their position
@@ -257,33 +274,49 @@ 

Structural Holes and Constraint

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
autographr(tasks, node_color = "low_constraint")
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
# Your code should look like this:
 
 tasks <- tasks %>% mutate(low_constraint = node_is_min(node_constraint(tasks)))
 autographr(tasks, node_color = "low_constraint")
-

Why minimum? And what can we learn from this plot about where -innovation might occur within this network?

+

Why minimum? Because constraint measures how well connected each +node’s partners are, with the implication that having few partners that +are already connected to each other puts a node in an advantageous +position to identify and share novel solutions to problems. So what can +we learn from this plot about where innovation might occur within this +network?

Structural Equivalence

Now we are going to identify and interpret the roles or relations -between a set of structurally equivalent positions. We’re going to -identify structurally equivalent positions across all the data that we -have, including ‘task’, ‘social’, and ‘friend’ ties.

+between a set of structurally equivalent positions. We want to know who +plays what role in this algebra class. We’re going to identify +structurally equivalent positions across all the data that we have, +including ‘task’, ‘social’, and ‘friend’ ties.

+
+
+
+
+
+ +
+

Finding structurally equivalent classes

In {migraph}, finding how the nodes of a network can be partitioned into structurally equivalent classes is as easy as:

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
node_structural_equivalence(ison_algebra)
 
 ison_algebra %>% 
@@ -306,12 +339,12 @@ 

Step one: starting with a census

relationships to tie partners.

+data-lines="0" data-pipe="|>">
+data-lines="0" data-pipe="|>">
# Let's use the node_tie_census() function
 # The function accepts an object such as a dataset
 # Hint: Which dataset are we using in this tutorial?
@@ -320,19 +353,20 @@ 

Step one: starting with a census

+data-lines="0" data-pipe="|>">
node_tie_census(ison_algebra)
+data-lines="0" data-pipe="|>">
# Now, let's get the dimensions of an object via the dim() function
 
 dim(node_tie_census(ison_algebra))
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
# Your code should look like this:
 
 node_tie_census(ison_algebra)
@@ -345,15 +379,24 @@ 

Step one: starting with a census

do if you wanted it to be binary?

-
# THIS IS A SUGGESTION
-# PERHAPS WE CAN MAKE THIS ANOTHER EXERCISE OR MCQ
+data-lines="0" data-pipe="|>">
+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>"> +
node_tie_census(ison_algebra)
+
+# we could convert the result using as.matrix, returning the ties 
+as.matrix((node_tie_census(ison_algebra)>0)+0)
 
+# we could simplify the network by removing the classfication into different types of ties
+# note that this also reduces the total number of possible paths between nodes
+ison_algebra %>%
+activate(edges) %>%
+dplyr::select(-c(friends, social, tasks)) %>%
+node_tie_census()

Note that node_tie_census() does not need to be passed to node_structural_equivalence() — this is done @@ -383,17 +426,19 @@

Step two: growing a tree of similarity

node_structural_equivalence():

+data-lines="0" data-pipe="|>">
+data-lines="0" data-pipe="|>">
plot(node_structural_equivalence(ison_algebra, cluster = "hier", distance = "euclidean"))
 
+# changing the type of distance
 plot(node_structural_equivalence(ison_algebra, cluster = "hier", distance = "manhattan"))
 
-plot(node_structural_equivalence(ison_algebra, cluster = "concor"))
+# changing the clustering algorithm +plot(node_structural_equivalence(to_named(ison_karateka), cluster = "concor"))
@@ -440,26 +485,26 @@

Step three: identifying the number of clusters

integer.

+data-lines="0" data-pipe="|>">
+data-lines="0" data-pipe="|>">
# Let's use the node_structural_equivalence() function and set 'k' to 2
 
 node_structural_equivalence(____, k = ____)
+data-lines="0" data-pipe="|>">
# Don't forget to plot the dendrogram using the plot() function
 
 node_structural_equivalence(ison_algebra, k = 2)
+data-lines="0" data-pipe="|>">
plot(node_structural_equivalence(ison_algebra, k = 2))

But we’re really just guessing. Maybe 2 is not the best @@ -494,33 +539,34 @@

Step three: identifying the number of clusters

return a somewhat different result to the elbow method. See what we have here, with all other arguments held the same:

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
# Let's recall the node_structural_equivalence() function we used in the previous example
 
 plot(node_structural_equivalence(____, k = "____"))
+data-lines="0" data-pipe="|>">
# Now, instead of looking at 2 clusters, let's look at 'elbow'
 
 plot(node_structural_equivalence(ison_algebra, k = "elbow"))
+data-lines="0" data-pipe="|>">
# Now, let's look at 'silhouette'
 
 plot(node_structural_equivalence(ison_algebra, k = "____"))
+data-lines="0" data-pipe="|>">
plot(node_structural_equivalence(ison_algebra, k = "elbow"))
 plot(node_structural_equivalence(ison_algebra, k = "silhouette"))
@@ -544,19 +590,20 @@

Step three: identifying the number of clusters

this misses the point in finding clusters of nodes that, despite some variation, can be considered as similar on some dimension.

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
# Again, let's use the same function as those used above and change 'k'
 
 plot(node_structural_equivalence(ison_algebra, k = "____"))
+data-lines="0" data-pipe="|>">
plot(node_structural_equivalence(ison_algebra, k = "strict"))

Here for example, no two nodes have precisely the same tie-profile, @@ -576,7 +623,8 @@

Summarising profiles

structurally equivalent classes. We can graph this of course, as above:

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
str_clu <- node_structural_equivalence(ison_algebra)
 
 ison_algebra %>% 
@@ -594,19 +642,20 @@ 

Summarising profiles

(values), but this can be tweaked by assigning some other summary statistic as FUN =.

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
#Let's bring the node_tie_census() function from Step 1
 
 node_tie_census(ison_algebra)
+data-lines="0" data-pipe="|>">
# Now, let's put it inside the summary() function
 # For a description of summary(), use ?summary()
 
@@ -614,7 +663,7 @@ 

Summarising profiles

+data-lines="0" data-pipe="|>">
# Great, but we are interested in looking at the structural membership of clusters
 # Let's call the object we created a few steps ago: str_clu
 
@@ -623,7 +672,7 @@ 

Summarising profiles

+data-lines="0" data-pipe="|>">
summary(node_tie_census(ison_algebra),
         membership = str_clu)
@@ -637,12 +686,13 @@

Summarising profiles

(without the membership vector, just the adjacency/incidence matrix is plotted):

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
# Let's plot the blockmodel using the plot() function we used for the dendrograms
 # Instead of node_tie_census() let's us as_matrix()
 
@@ -651,7 +701,7 @@ 

Summarising profiles

+data-lines="0" data-pipe="|>">
plot(as_matrix(ison_algebra),
      membership = str_clu)
@@ -687,7 +737,7 @@

Reduced graph

becoming loops and thus the network will be complex.

+data-lines="0" data-pipe="|>">
str_clu <- node_structural_equivalence(ison_algebra)
 (bm <- to_blocks(ison_algebra, str_clu))
 
@@ -705,7 +755,7 @@ 

Unit Test

+data-lines="0" data-pipe="|>">
    @@ -714,7 +764,7 @@

    Unit Test

+data-lines="0" data-pipe="|>">
    @@ -723,7 +773,7 @@

    Unit Test

+data-lines="0" data-pipe="|>">

@@ -768,17 +818,20 @@

Unit Test

"library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "data", - code = "", opts = list(label = "\"data\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure("data(\"ison_algebra\", package = \"manynet\")", chunk_opts = list( + code = "", opts = list(label = "\"data\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("data(\"ison_algebra\", package = \"manynet\")", + "?manynet::ison_algebra", "# If you want to see the network object, you can run the name of the object", + "ison_algebra", "# or print the code with brackets at the front and end of the code", + "(ison_algebra <- manynet::ison_algebra)"), chunk_opts = list( label = "data-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, highlight = FALSE, size = "normalsize", background = "#F7F7F7", - strip.white = TRUE, cache = 0, cache.path = "equivalence_cache/html/", + strip.white = TRUE, cache = 0, cache.path = "position_cache/html/", cache.vars = NULL, cache.lazy = TRUE, dependson = NULL, autodep = FALSE, cache.rebuild = FALSE, fig.keep = "high", - fig.show = "asis", fig.align = "default", fig.path = "equivalence_files/figure-html/", + fig.show = "asis", fig.align = "default", fig.path = "position_files/figure-html/", dev = "png", dev.args = NULL, dpi = 192, fig.ext = "png", fig.width = 6.5, fig.height = 4, fig.env = "figure", fig.cap = NULL, fig.scap = NULL, fig.lp = "fig:", fig.subcap = NULL, @@ -786,12 +839,13 @@

Unit Test

fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "data", exercise = TRUE, code = "", context = "data", out.width.px = 624, out.height.px = 384, - params.src = "data, exercise = TRUE", fig.num = 0L, exercise.df_print = "paged", - exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", -"tutorial_exercise"))) + params.src = "data, exercise = TRUE, purl = FALSE", fig.num = 0L, + exercise.df_print = "paged", exercise.checker = "NULL"), + engine = "r", version = "4"), class = c("r", "tutorial_exercise" +))) + + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - + + - - + - + +
@@ -1567,7 +1658,7 @@

Unit Test

-

Equivalence

+

Position

by James Hollway

diff --git a/inst/tutorials/tutorial6/topology.Rmd b/inst/tutorials/tutorial6/topology.Rmd index c372b842..38cb00ec 100644 --- a/inst/tutorials/tutorial6/topology.Rmd +++ b/inst/tutorials/tutorial6/topology.Rmd @@ -46,7 +46,7 @@ give them an informative title, and plot the graphs together. What would a complete network with half the nodes look like? Add that too. -```{r empty, exercise=TRUE} +```{r empty, exercise=TRUE, purl = FALSE} ``` @@ -71,7 +71,7 @@ Use the `create_star()` function to graph three star networks: - a out-directed star network - and an in-directed star network -```{r star, exercise = TRUE} +```{r star, exercise = TRUE, purl = FALSE} ``` @@ -91,7 +91,7 @@ Again graph three networks: - a directed network with 2 branches per node - the same as above, but graphed using the "tree" layout -```{r tree, exercise = TRUE} +```{r tree, exercise = TRUE, purl = FALSE} ``` @@ -131,7 +131,7 @@ two-dimensional representation with a given number of nodes. Graph two lattices, one with 50 nodes, and another with half the number of nodes. -```{r lattices, exercise = TRUE} +```{r lattices, exercise = TRUE, purl = FALSE} ``` @@ -151,7 +151,7 @@ Graph three ring networks: on a "circle" layout - the same as above, but on a "stress" layout -```{r rings, exercise = TRUE} +```{r rings, exercise = TRUE, purl = FALSE} ``` @@ -178,7 +178,7 @@ which connects each pair of nodes ${i,j}$ with probability $p$, independent. Note that for a “sparse” ER graphs, $p$ must decrease as $N$ goes up. Generate three random networks of 50 nodes and a density of 0.08: -```{r random, exercise = TRUE} +```{r random, exercise = TRUE, purl = FALSE} ``` @@ -193,7 +193,7 @@ Note that you can also pass the second argument an integer, in which case the function will interpret that as the number of ties/edges rather than the probability that a tie is present. Try generating a random graph with 200 edges/ties now: -```{r randomno, exercise = TRUE} +```{r randomno, exercise = TRUE, purl = FALSE} ``` @@ -208,7 +208,7 @@ What if we rewire (change) some of the edges at a certain probability? This is how small-world networks are generated. Graph three small-world networks, all with 50 nodes and a rewiring probability of 0.025. -```{r smallw, exercise = TRUE} +```{r smallw, exercise = TRUE, purl = FALSE} ``` @@ -234,7 +234,7 @@ where 1 is as close to a small-world as possible. Try it now on a small-world generated network, but with a rewiring probability of 0.25: -```{r smallwtest, exercise = TRUE} +```{r smallwtest, exercise = TRUE, purl = FALSE} ``` @@ -263,7 +263,7 @@ connect to already well-connected nodes rather than poorly connected ones. Generate and graph three scale-free networks, with alpha parameters of 0.5, 1, and 1.5. -```{r scalef, exercise = TRUE} +```{r scalef, exercise = TRUE, purl = FALSE} ``` @@ -285,7 +285,7 @@ With an alpha/power-law exponent between 2 and 3, one generally cannot reject the hypothesis that the observed data comes from a power-law distribution. -```{r scaleftest, exercise = TRUE} +```{r scaleftest, exercise = TRUE, purl = FALSE} ``` @@ -310,7 +310,7 @@ Graph a core-periphery network of 50 nodes (which, unless a core-periphery membership assignment is given, will be split evenly between core and periphery partitions). -```{r core, exercise=TRUE} +```{r core, exercise=TRUE, purl = FALSE} ``` @@ -324,7 +324,7 @@ Let's use the `ison_brandes` dataset from `{manynet}`, and identify the core/periphery assignment of the _nodes_. Graph the data with the core/periphery assignment. -```{r nodecore, exercise=TRUE} +```{r nodecore, exercise=TRUE, purl = FALSE} ``` @@ -340,7 +340,7 @@ In `{migraph}`, we can return nodes _k_-coreness with `node_coreness()` instead of the `node_core()` used for core-periphery. -```{r nodecoren, exercise=TRUE} +```{r nodecoren, exercise=TRUE, purl = FALSE} ``` diff --git a/inst/tutorials/tutorial6/topology.html b/inst/tutorials/tutorial6/topology.html index 3a19b706..a7db4615 100644 --- a/inst/tutorials/tutorial6/topology.html +++ b/inst/tutorials/tutorial6/topology.html @@ -13,7 +13,7 @@ - + Topology @@ -140,12 +140,13 @@

Deterministic graphs

give them an informative title, and plot the graphs together. What would a complete network with half the nodes look like? Add that too.

+data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
+data-lines="0" data-pipe="|>">
(autographr(create_empty(50), "circle") + ggtitle("Empty graph"))
 (autographr(to_undirected(create_filled(50))) + ggtitle("Complete graph"))
 (autographr(to_undirected(create_filled(50/2))) + ggtitle("Complete graph (smaller)"))
@@ -165,12 +166,13 @@

Stars

  • and an in-directed star network
  • +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    (autographr(create_star(50)) + ggtitle("Star graph"))
     (autographr(create_star(50, directed = TRUE)) + ggtitle("Star out"))
     (autographr(to_redirected(create_star(50, directed = TRUE))) + ggtitle("Star in"))
    @@ -187,12 +189,13 @@

    Trees

  • the same as above, but graphed using the “tree” layout
  • +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    (autographr(create_tree(50, width = 2)) + ggtitle("Tree graph"))
     (autographr(create_tree(50, width = 2, directed = TRUE)) + ggtitle("Tree out"))
     (autographr(create_tree(50, width = 2, directed = TRUE), "tree") + ggtitle("Tree layout"))
    @@ -223,12 +226,13 @@

    Lattices

    Graph two lattices, one with 50 nodes, and another with half the number of nodes.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    (autographr(create_lattice(50)) + ggtitle("One-mode lattice graph"))
     (autographr(create_lattice(50/2)) + ggtitle("Smaller lattice graph"))
    @@ -244,12 +248,13 @@

    Rings

  • the same as above, but on a “stress” layout
  • +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    (autographr(create_ring(50)) + ggtitle("Ring graph", subtitle = "Starring Naomi Watts"))
     (autographr(create_ring(50, width = 2), "circle") + ggtitle("The Ring Two", subtitle = "No different?"))
     (autographr(create_ring(50, width = 2), "stress") + ggtitle("The Ring Two v2.0"))
    @@ -275,12 +280,13 @@

    Random graphs

    must decrease as \(N\) goes up. Generate three random networks of 50 nodes and a density of 0.08:

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    (autographr(generate_random(50, 0.08)) + ggtitle("Random 1 graph"))
     (autographr(generate_random(50, 0.08)) + ggtitle("Random 2 graph"))
     (autographr(generate_random(50, 0.08)) + ggtitle("Random 3 graph"))
    @@ -291,12 +297,13 @@

    Random graphs

    than the probability that a tie is present. Try generating a random graph with 200 edges/ties now:

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    (erdren4 <- autographr(generate_random(50, 200)) + ggtitle("Random 1 graph"))
    @@ -307,12 +314,13 @@

    Small-world graphs

    are generated. Graph three small-world networks, all with 50 nodes and a rewiring probability of 0.025.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    (autographr(generate_smallworld(50, 0.025)) + ggtitle("Smallworld 1 graph"))
     (autographr(generate_smallworld(50, 0.025)) + ggtitle("Smallworld 2 graph"))
     (autographr(generate_smallworld(50, 0.025)) + ggtitle("Smallworld 3 graph"))
    @@ -332,12 +340,12 @@

    Small-world graphs

    generated network, but with a rewiring probability of 0.25:

    +data-lines="0" data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    network_smallworld(generate_smallworld(50, 0.25))
    @@ -360,12 +368,13 @@

    Scale-free graphs

    Generate and graph three scale-free networks, with alpha parameters of 0.5, 1, and 1.5.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    (autographr(generate_scalefree(50, 0.5)) +
         ggtitle("Scalefree 1 graph", subtitle = "Power = .5"))
     (autographr(generate_scalefree(50, 1)) +
    @@ -381,12 +390,12 @@ 

    Scale-free graphs

    observed data comes from a power-law distribution.

    +data-lines="0" data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    network_scalefree(generate_scalefree(50, 2))
    @@ -403,12 +412,13 @@

    Core-periphery graphs

    50 nodes (which, unless a core-periphery membership assignment is given, will be split evenly between core and periphery partitions).

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    (autographr(create_core(50)) + ggtitle("Core"))

    Let’s consider identifying the core and peripheral nodes in a @@ -417,12 +427,13 @@

    Core-periphery graphs

    the nodes. Graph the data with the core/periphery assignment.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    ison_brandes %>% 
       mutate(nc = node_core(ison_brandes)) %>% 
       autographr(node_color = "nc")
    @@ -433,12 +444,12 @@

    Core-periphery graphs

    node_core() used for core-periphery.

    +data-lines="0" data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    ison_brandes %>% 
       mutate(ncn = node_coreness(ison_brandes)) %>% 
       autographr(node_color = "ncn")
    @@ -485,9 +496,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "empty", - code = "", opts = list(label = "\"empty\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("(autographr(create_empty(50), \"circle\") + ggtitle(\"Empty graph\"))", + code = "", opts = list(label = "\"empty\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("(autographr(create_empty(50), \"circle\") + ggtitle(\"Empty graph\"))", "(autographr(to_undirected(create_filled(50))) + ggtitle(\"Complete graph\"))", "(autographr(to_undirected(create_filled(50/2))) + ggtitle(\"Complete graph (smaller)\"))" ), chunk_opts = list(label = "empty-solution")), tests = NULL, @@ -506,9 +517,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "empty", exercise = TRUE, code = "", - out.width.px = 624, out.height.px = 384, params.src = "empty, exercise=TRUE", + out.width.px = 624, out.height.px = 384, params.src = "empty, exercise=TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -527,9 +538,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "star", - code = "", opts = list(label = "\"star\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("(autographr(create_star(50)) + ggtitle(\"Star graph\"))", + code = "", opts = list(label = "\"star\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("(autographr(create_star(50)) + ggtitle(\"Star graph\"))", "(autographr(create_star(50, directed = TRUE)) + ggtitle(\"Star out\"))", "(autographr(to_redirected(create_star(50, directed = TRUE))) + ggtitle(\"Star in\"))" ), chunk_opts = list(label = "star-solution")), tests = NULL, @@ -548,9 +559,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "star", exercise = TRUE, code = "", - out.width.px = 624, out.height.px = 384, params.src = "star, exercise = TRUE", + out.width.px = 624, out.height.px = 384, params.src = "star, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -569,9 +580,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "tree", - code = "", opts = list(label = "\"tree\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("(autographr(create_tree(50, width = 2)) + ggtitle(\"Tree graph\"))", + code = "", opts = list(label = "\"tree\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("(autographr(create_tree(50, width = 2)) + ggtitle(\"Tree graph\"))", "(autographr(create_tree(50, width = 2, directed = TRUE)) + ggtitle(\"Tree out\"))", "(autographr(create_tree(50, width = 2, directed = TRUE), \"tree\") + ggtitle(\"Tree layout\"))" ), chunk_opts = list(label = "tree-solution")), tests = NULL, @@ -590,9 +601,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "tree", exercise = TRUE, code = "", - out.width.px = 624, out.height.px = 384, params.src = "tree, exercise = TRUE", + out.width.px = 624, out.height.px = 384, params.src = "tree, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -611,9 +622,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "lattices", - code = "", opts = list(label = "\"lattices\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("(autographr(create_lattice(50)) + ggtitle(\"One-mode lattice graph\"))", + code = "", opts = list(label = "\"lattices\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("(autographr(create_lattice(50)) + ggtitle(\"One-mode lattice graph\"))", "(autographr(create_lattice(50/2)) + ggtitle(\"Smaller lattice graph\"))" ), chunk_opts = list(label = "lattices-solution")), tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", @@ -631,9 +642,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "lattices", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "lattices, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "lattices, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -652,9 +663,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "rings", - code = "", opts = list(label = "\"rings\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("(autographr(create_ring(50)) + ggtitle(\"Ring graph\", subtitle = \"Starring Naomi Watts\"))", + code = "", opts = list(label = "\"rings\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("(autographr(create_ring(50)) + ggtitle(\"Ring graph\", subtitle = \"Starring Naomi Watts\"))", "(autographr(create_ring(50, width = 2), \"circle\") + ggtitle(\"The Ring Two\", subtitle = \"No different?\"))", "(autographr(create_ring(50, width = 2), \"stress\") + ggtitle(\"The Ring Two v2.0\"))" ), chunk_opts = list(label = "rings-solution")), tests = NULL, @@ -673,9 +684,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "rings", exercise = TRUE, code = "", - out.width.px = 624, out.height.px = 384, params.src = "rings, exercise = TRUE", + out.width.px = 624, out.height.px = 384, params.src = "rings, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -694,9 +705,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "random", - code = "", opts = list(label = "\"random\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("(autographr(generate_random(50, 0.08)) + ggtitle(\"Random 1 graph\"))", + code = "", opts = list(label = "\"random\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("(autographr(generate_random(50, 0.08)) + ggtitle(\"Random 1 graph\"))", "(autographr(generate_random(50, 0.08)) + ggtitle(\"Random 2 graph\"))", "(autographr(generate_random(50, 0.08)) + ggtitle(\"Random 3 graph\"))" ), chunk_opts = list(label = "random-solution")), tests = NULL, @@ -715,9 +726,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "random", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "random, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "random, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -736,9 +747,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "randomno", - code = "", opts = list(label = "\"randomno\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure("(erdren4 <- autographr(generate_random(50, 200)) + ggtitle(\"Random 1 graph\"))", chunk_opts = list( + code = "", opts = list(label = "\"randomno\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure("(erdren4 <- autographr(generate_random(50, 200)) + ggtitle(\"Random 1 graph\"))", chunk_opts = list( label = "randomno-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -754,9 +765,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "randomno", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "randomno, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "randomno, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -775,9 +786,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "smallw", - code = "", opts = list(label = "\"smallw\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("(autographr(generate_smallworld(50, 0.025)) + ggtitle(\"Smallworld 1 graph\"))", + code = "", opts = list(label = "\"smallw\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("(autographr(generate_smallworld(50, 0.025)) + ggtitle(\"Smallworld 1 graph\"))", "(autographr(generate_smallworld(50, 0.025)) + ggtitle(\"Smallworld 2 graph\"))", "(autographr(generate_smallworld(50, 0.025)) + ggtitle(\"Smallworld 3 graph\"))" ), chunk_opts = list(label = "smallw-solution")), tests = NULL, @@ -796,9 +807,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "smallw", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "smallw, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "smallw, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -817,9 +828,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "smallwtest", - code = "", opts = list(label = "\"smallwtest\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure("network_smallworld(generate_smallworld(50, 0.25))", chunk_opts = list( + code = "", opts = list(label = "\"smallwtest\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure("network_smallworld(generate_smallworld(50, 0.25))", chunk_opts = list( label = "smallwtest-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -835,9 +846,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "smallwtest", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "smallwtest, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "smallwtest, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -856,9 +867,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "scalef", - code = "", opts = list(label = "\"scalef\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("(autographr(generate_scalefree(50, 0.5)) +", + code = "", opts = list(label = "\"scalef\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("(autographr(generate_scalefree(50, 0.5)) +", " ggtitle(\"Scalefree 1 graph\", subtitle = \"Power = .5\"))", "(autographr(generate_scalefree(50, 1)) +", " ggtitle(\"Scalefree 2 graph\", subtitle = \"Power = 1\"))", "(autographr(generate_scalefree(50, 1.5)) +", " ggtitle(\"Scalefree 3 graph\", subtitle = \"Power = 1.5\"))" @@ -878,9 +889,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "scalef", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "scalef, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "scalef, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -899,9 +910,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "scaleftest", - code = "", opts = list(label = "\"scaleftest\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure("network_scalefree(generate_scalefree(50, 2))", chunk_opts = list( + code = "", opts = list(label = "\"scaleftest\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure("network_scalefree(generate_scalefree(50, 2))", chunk_opts = list( label = "scaleftest-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -917,9 +928,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "scaleftest", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "scaleftest, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "scaleftest, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -938,9 +949,9 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "core", - code = "", opts = list(label = "\"core\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure("(autographr(create_core(50)) + ggtitle(\"Core\"))", chunk_opts = list( + code = "", opts = list(label = "\"core\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure("(autographr(create_core(50)) + ggtitle(\"Core\"))", chunk_opts = list( label = "core-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -956,9 +967,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "core", exercise = TRUE, code = "", - out.width.px = 624, out.height.px = 384, params.src = "core, exercise=TRUE", + out.width.px = 624, out.height.px = 384, params.src = "core, exercise=TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -977,11 +988,12 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "nodecore", - code = "", opts = list(label = "\"nodecore\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("ison_brandes %>% ", " mutate(nc = node_core(ison_brandes)) %>% ", - " autographr(node_color = \"nc\")"), chunk_opts = list(label = "nodecore-solution")), - tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", + code = "", opts = list(label = "\"nodecore\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("ison_brandes %>% ", + " mutate(nc = node_core(ison_brandes)) %>% ", " autographr(node_color = \"nc\")" + ), chunk_opts = list(label = "nodecore-solution")), tests = NULL, + options = list(eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, highlight = FALSE, size = "normalsize", background = "#F7F7F7", strip.white = TRUE, cache = 0, @@ -996,9 +1008,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "nodecore", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "nodecore, exercise=TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "nodecore, exercise=TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1017,18 +1029,19 @@

    Core-periphery graphs

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "nodecoren", - code = "", opts = list(label = "\"nodecoren\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("ison_brandes %>% ", " mutate(ncn = node_coreness(ison_brandes)) %>% ", - " autographr(node_color = \"ncn\")"), chunk_opts = list( - label = "nodecoren-solution")), tests = NULL, options = list( - eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, - tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, - highlight = FALSE, size = "normalsize", background = "#F7F7F7", - strip.white = TRUE, cache = 0, cache.path = "topology_cache/html/", - cache.vars = NULL, cache.lazy = TRUE, dependson = NULL, - autodep = FALSE, cache.rebuild = FALSE, fig.keep = "high", - fig.show = "asis", fig.align = "default", fig.path = "topology_files/figure-html/", + code = "", opts = list(label = "\"nodecoren\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("ison_brandes %>% ", + " mutate(ncn = node_coreness(ison_brandes)) %>% ", " autographr(node_color = \"ncn\")" + ), chunk_opts = list(label = "nodecoren-solution")), tests = NULL, + options = list(eval = FALSE, echo = TRUE, results = "markup", + tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, + comment = NA, highlight = FALSE, size = "normalsize", + background = "#F7F7F7", strip.white = TRUE, cache = 0, + cache.path = "topology_cache/html/", cache.vars = NULL, + cache.lazy = TRUE, dependson = NULL, autodep = FALSE, + cache.rebuild = FALSE, fig.keep = "high", fig.show = "asis", + fig.align = "default", fig.path = "topology_files/figure-html/", dev = "png", dev.args = NULL, dpi = 192, fig.ext = "png", fig.width = 6.5, fig.height = 4, fig.env = "figure", fig.cap = NULL, fig.scap = NULL, fig.lp = "fig:", fig.subcap = NULL, @@ -1036,9 +1049,9 @@

    Core-periphery graphs

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "nodecoren", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "nodecoren, exercise=TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "nodecoren, exercise=TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1046,12 +1059,12 @@

    Core-periphery graphs

    diff --git a/inst/tutorials/tutorial7/diffusion.Rmd b/inst/tutorials/tutorial7/diffusion.Rmd index ba498820..272d3dc5 100644 --- a/inst/tutorials/tutorial7/diffusion.Rmd +++ b/inst/tutorials/tutorial7/diffusion.Rmd @@ -25,18 +25,18 @@ Let's take the `ison_networkers` dataset from `{manynet}`, and create or generate ring, lattice, random, scale-free, and small-world versions with the same number of nodes. -```{r create, echo = TRUE, exercise = TRUE} +```{r create, echo = TRUE, exercise = TRUE, purl = FALSE} ``` -```{r create-hint-1} +```{r create-hint-1, purl = FALSE} # Let's create a new object, "nw", the removes the names of all vertex names # Hint: We want to use two functions used for reformatting networks, graphs, and matrices nw <- ____(____(ison_networkers)) ``` -```{r create-hint-2} +```{r create-hint-2, purl = FALSE} # We also want to remove edge direction, so that any pair of nodes with at least # one directed edge will be connected by an undirected edge in the new network. @@ -57,45 +57,45 @@ Now, using the "nw" network from the last section, let's create or generate ring + `generate_scalefree()`: Generates a small-world structure following the lattice rewiring model. + `generate_smallworld()`: Generates a scale-free structure following the preferential attachment model. -```{r create-nwstructure, echo = TRUE, exercise = TRUE} +```{r create-nwstructure, echo = TRUE, exercise = TRUE, purl = FALSE} ``` -```{r create-nwstructure-hint-1} +```{r create-nwstructure-hint-1, purl = FALSE} # Let's generate a ring structure, "rg", with a width of 2, using the appropriate # function above rg <- ____(____, ____) ``` -```{r create-nwstructure-hint-2} +```{r create-nwstructure-hint-2, purl = FALSE} rg <- create_ring(nw, width = 2) ``` -```{r create-nwstructure-hint-3} +```{r create-nwstructure-hint-3, purl = FALSE} # Let's generate a lattice structure, "la", using the appropriate function above la <- ____(____) ``` -```{r create-nwstructure-hint-4} +```{r create-nwstructure-hint-4, purl = FALSE} la <- create_lattice(nw) ``` -```{r create-nwstructure-hint-5} +```{r create-nwstructure-hint-5, purl = FALSE} # Let's generate a random structure, "rd", without attributes rd <- ____(____, ____) ``` -```{r create-nwstructure-hint-6} +```{r create-nwstructure-hint-6, purl = FALSE} rd <- generate_random(nw, with_attr = FALSE) ``` -```{r create-nwstructure-hint-7} +```{r create-nwstructure-hint-7, purl = FALSE} # The last two will look similar. For the smallworld structure we call the object "sw" # and for scalefree, "sf". We will also set the proportion of possible ties to 0.025. @@ -103,13 +103,13 @@ sf <- ____(nw, ____) sw <- ____(nw, ____) ``` -```{r create-nwstructure-hint-8} +```{r create-nwstructure-hint-8, purl = FALSE} sf <- generate_scalefree(nw, 0.025) sw <- generate_smallworld(nw, 0.025) ``` -```{r create-nwstructure-hint-9} +```{r create-nwstructure-hint-9, purl = FALSE} # Finally, let's plot the respective graphs: autographr(____) + ggtitle("Networkers") + @@ -144,11 +144,11 @@ that of the ring network. To run a basic diffusion model across this network, simply pass it to `play_diffusion()` and (save and) plot the result. -```{r ring, exercise = TRUE, exercise.setup = "create"} +```{r ring, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` -```{r ring-hint} +```{r ring-hint, purl = FALSE} # Let's call the ring structure from the previous section, "rg", and create a new object # "rg1" with a seed of 1. Don't forget to plot it! @@ -180,7 +180,7 @@ To see whether this is true, try choosing the sixteenth (middle) node and see whether the result is any different. -```{r ring2, exercise = TRUE, exercise.setup = "create"} +```{r ring2, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` @@ -189,7 +189,7 @@ rg2 <- play_diffusion(rg, seeds = 16) plot(rg2) ``` -```{r ring2-interp, echo = FALSE} +```{r ring2-interp, echo = FALSE, purl = FALSE} question("Do you see any differences?", answer("Yes"), answer("No", correct = TRUE), @@ -200,11 +200,11 @@ Now what if we seed the network with more than one infected node? Choosing the first four nodes we can see that the process is jump-started, but doesn't really conclude that much faster. -```{r ring3, exercise = TRUE, exercise.setup = "create"} +```{r ring3, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` -```{r ring3-hint} +```{r ring3-hint, purl = FALSE} # Remember we want to see the first four nodes. plot(play_diffusion(rg, seeds = ____)) @@ -218,11 +218,11 @@ But what if we seed the network at three different places? Here we can use `node_is_random()` to randomly select some nodes to seed. Try it with four randomly-selected nodes and see what you get. -```{r ring4, exercise = TRUE, exercise.setup = "create"} +```{r ring4, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` -```{r ring4-hint} +```{r ring4-hint, purl = FALSE} # We will be using the node_is_random() within the seed argument to random select # 4 nodes @@ -233,7 +233,7 @@ plot(play_diffusion(rg, seeds = ____(rg, ____))) plot(play_diffusion(rg, seeds = node_is_random(rg, 4))) ``` -```{r ring4-interp, echo = FALSE} +```{r ring4-interp, echo = FALSE, purl = FALSE} question("Do you see any differences?", answer("Yes", correct = TRUE), answer("No"), @@ -251,7 +251,7 @@ when the network has a different structure. Here let's play and plot two diffusion on the lattice network, one with the first node as seed and again one on the last. -```{r lattice, exercise = TRUE, exercise.setup = "create"} +```{r lattice, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` @@ -260,7 +260,7 @@ plot(play_diffusion(la, seeds = 1))/ plot(play_diffusion(la, seeds = 16)) ``` -```{r lattice-interp, echo = FALSE} +```{r lattice-interp, echo = FALSE, purl = FALSE} question("Do you see any differences?", answer("Yes", correct = TRUE), answer("No"), @@ -277,7 +277,7 @@ Similar to the previous examples, we will be using the following functions withi + `node_is_max()`: Returns logical of which nodes hold the maximum of some measure. + `node_is_min()`: Returns logical of which nodes hold the minimum of some measure. -```{r scale, exercise = TRUE, exercise.setup = "create"} +```{r scale, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` @@ -288,7 +288,7 @@ plot(play_diffusion(sf, seeds = node_is_max(node_degree(sf)), steps = 10)) / plot(play_diffusion(sf, seeds = node_is_min(node_degree(sf)), steps = 10)) ``` -```{r mindeg-interp, echo = FALSE} +```{r mindeg-interp, echo = FALSE, purl = FALSE} question("Which of these four led to the fastest diffusion process?", answer("Minimum degree node(s), because there are many more nodes with the minimum degree.", correct = TRUE), answer("Node 10 as seed, because it is the most influential."), @@ -309,7 +309,7 @@ Show that whereas a threshold of one will result in complete infection, a threshold of two will not lead to any diffusion process unless there are two seeds and that they are in another nodes neighbourhood. -```{r complex, exercise = TRUE, exercise.setup = "create"} +```{r complex, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` @@ -320,7 +320,7 @@ plot(play_diffusion(rg, seeds = 1:2, thresholds = 2))/ plot(play_diffusion(rg, seeds = c(1,16), thresholds = 2)) ``` -```{r complex-interp, echo = FALSE} +```{r complex-interp, echo = FALSE, purl = FALSE} question("For which seed/threshold combinations was there complete infection?", answer("seeds = 1 and thresholds = 1", correct = TRUE), answer("seeds = 1 and thresholds = 2"), @@ -335,7 +335,7 @@ A threshold of 2 would be easy to surpass for particularly well connected nodes, but impossible for pendants. Let's see what happens when we use this threshold on a scale-free network. -```{r sfcomplex, exercise = TRUE, exercise.setup = "create"} +```{r sfcomplex, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` @@ -343,7 +343,7 @@ Let's see what happens when we use this threshold on a scale-free network. plot(play_diffusion(sf, seeds = 1, thresholds = 2)) ``` -```{r sfcomplex-interp, echo=FALSE} +```{r sfcomplex-interp, echo=FALSE, purl = FALSE} question("Does it matter how many seeds are used?", answer("Yes"), answer("No", correct = TRUE), @@ -356,11 +356,11 @@ as a proportion of contacts that should be infected before the node will become infected. Try thresholds of 0.1, 0.25, and 0.5 on two seeds and 10 steps. -```{r sfprop, exercise = TRUE, exercise.setup = "create"} +```{r sfprop, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` -```{r sfprop-hint} +```{r sfprop-hint, purl = FALSE} plot(play_diffusion(sf, seeds = 1:2, thresholds = ____, steps = ____))/ plot(play_diffusion(sf, seeds = 1:2, thresholds = ____, steps = ____))/ plot(play_diffusion(sf, seeds = 1:2, thresholds = ____, steps = ____)) @@ -372,7 +372,7 @@ plot(play_diffusion(sf, seeds = 1:2, thresholds = 0.25, steps = 10))/ plot(play_diffusion(sf, seeds = 1:2, thresholds = 0.5, steps = 10)) ``` -```{r sfprop-interp, echo = FALSE} +```{r sfprop-interp, echo = FALSE, purl = FALSE} question("Does the threshold proportion matter?", answer("Yes", correct = TRUE), answer("No"), @@ -389,11 +389,11 @@ Try two diffusion models, one where the threshold is 0.1 for the first 10 and 0.25 for the latter group of 22 nodes, and another diffusion where the threshold levels are reversed. -```{r rand, exercise = TRUE, exercise.setup = "create"} +```{r rand, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` -```{r rand-hint} +```{r rand-hint, purl = FALSE} plot(play_diffusion(sf, thresholds = c(rep(____,____), rep(____,____))))/ plot(play_diffusion(sf, thresholds = c(rep(____,____), rep(____,____)))) ``` @@ -435,11 +435,11 @@ Try this out with our well-mixed random network, 10 steps, 5 times, and with a `transmissibility` parameter set to 0.5 to indicate that in only 1/2 cases is contagion successful. -```{r diffusions, exercise = TRUE, exercise.setup = "create"} +```{r diffusions, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` -```{r diffusions-hint} +```{r diffusions-hint, purl = FALSE} # Remember, we are looking at the random network from before, "rd", with # a transmissibility parameter of 0.5, 5 times, and 10 steps. @@ -466,11 +466,11 @@ Let's try a rate of recovery of 0.20, which means that it'll take an infected node on average 5 steps (days?) to recover. -```{r sir, exercise = TRUE} +```{r sir, exercise = TRUE, purl = FALSE} ``` -```{r sir-hint} +```{r sir-hint, purl = FALSE} # Remember, we are still looking at the random network, "rd", with a # recovery rate of 20 percent. @@ -492,7 +492,7 @@ from the contagion doesn't last forever. In this kind of model, add an additional waning parameter of 0.05. Play a single diffusion so that you can see what's going on in a particular run. -```{r sirs, exercise = TRUE} +```{r sirs, exercise = TRUE, purl = FALSE} ``` @@ -500,7 +500,7 @@ Play a single diffusion so that you can see what's going on in a particular run. plot(play_diffusion(rd, recovery = 0.25, waning = 0.05)) ``` -```{r sirs-interp, echo = FALSE} +```{r sirs-interp, echo = FALSE, purl = FALSE} question("Does the process reach a stable state?", answer("Yes", correct = TRUE), answer("No"), @@ -516,7 +516,7 @@ Again, this should be specified as a proportion (try 0.25, approx four days). Play a single diffusion so that you can see what's going on in a particular run. -```{r seir, exercise = TRUE, exercise.setup = "create"} +```{r seir, exercise = TRUE, exercise.setup = "create", purl = FALSE} ``` @@ -539,11 +539,11 @@ First of all, check whether the network is _connected_ and _aperiodic_ via the f + `is_connected()`: Tests whether network is weakly connected if the network is *undirected* or strongly connected if directed. + `is_aperiodic()`: Tests whether network is aperiodic. -```{r aperiod, exercise = TRUE} +```{r aperiod, exercise = TRUE, purl = FALSE} ``` -```{r aperiod-hint} +```{r aperiod-hint, purl = FALSE} # By default is_connected() will check whether a directed network # is strongly connected. ``` @@ -553,7 +553,7 @@ is_connected(ison_networkers) is_aperiodic(ison_networkers) ``` -```{r aperiod-interp, echo = FALSE} +```{r aperiod-interp, echo = FALSE, purl = FALSE} question("Based on these results, would you expect this network to converge to a consensus?", answer("No"), answer("Yes", correct = TRUE), @@ -575,11 +575,11 @@ Create the distribution of **beliefs** and graph the network to show where they have been distributed. Then play the learning model with these beliefs, and plot the result. -```{r degroot, exercise = TRUE} +```{r degroot, exercise = TRUE, purl = FALSE} ``` -```{r degroot-hint} +```{r degroot-hint, purl = FALSE} beliefs <- rbinom(network_nodes(____), 1, prob = 0.25) ____ %>% mutate(____ = beliefs) %>% autographr(node_color = "____") netlearn <- play_learning(____, ____) @@ -601,7 +601,7 @@ Then we can see how responsive these nodes are to the random distribution of beliefs across the network. Some revise their beliefs more significantly than others. -```{r degroot-interp, echo = FALSE} +```{r degroot-interp, echo = FALSE, purl = FALSE} question("What are some true statements about these results?", answer("Some nodes revise their beliefs more than others.", correct = TRUE), diff --git a/inst/tutorials/tutorial7/diffusion.html b/inst/tutorials/tutorial7/diffusion.html index 82b68b7e..e99049ea 100644 --- a/inst/tutorials/tutorial7/diffusion.html +++ b/inst/tutorials/tutorial7/diffusion.html @@ -13,7 +13,7 @@ - + Diffusion @@ -120,12 +120,13 @@

    Investigate diffusion through simulation

    {manynet}, and create or generate ring, lattice, random, scale-free, and small-world versions with the same number of nodes.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    # Let's create a new object, "nw", the removes the names of all vertex names
     # Hint: We want to use two functions used for reformatting networks, graphs, and matrices
     
    @@ -133,7 +134,7 @@ 

    Investigate diffusion through simulation

    +data-lines="0" data-pipe="|>">
    # We also want to remove edge direction, so that any pair of nodes with at least
     # one directed edge will be connected by an undirected edge in the new network.
     
    @@ -141,7 +142,7 @@ 

    Investigate diffusion through simulation

    +data-lines="0" data-pipe="|>">
    nw <- to_undirected(to_unnamed(ison_networkers))
    Creating and visualising different network structures
    +data-lines="0" data-pipe="|>">
    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    # Let's generate a ring structure, "rg", with a width of 2, using the appropriate
     # function above
     
    @@ -178,36 +180,42 @@ 

    Creating and visualising different network structures

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    rg <- create_ring(nw, width = 2)
    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    # Let's generate a lattice structure, "la", using the appropriate function above
     
     la <- ____(____)
    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    la <- create_lattice(nw)
    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    # Let's generate a random structure, "rd", without attributes
     
     rd <- ____(____, ____)
    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    rd <- generate_random(nw, with_attr = FALSE)
    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    # The last two will look similar. For the smallworld structure we call the object "sw" 
     # and for scalefree, "sf". We will also set the proportion of possible ties to 0.025.
     
    @@ -216,13 +224,15 @@ 

    Creating and visualising different network structures

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    sf <- generate_scalefree(nw, 0.025)
     sw <- generate_smallworld(nw, 0.025)
    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    # Finally, let's plot the respective graphs:
     
     autographr(____) + ggtitle("Networkers") +
    @@ -234,7 +244,8 @@ 

    Creating and visualising different network structures

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    # Here is the solution:
     
     rg <- create_ring(nw, width = 2)
    @@ -259,12 +270,13 @@ 

    Examining diffusion across networks of different structure

    across this network, simply pass it to play_diffusion() and (save and) plot the result.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    # Let's call the ring structure from the previous section, "rg", and create a new object
     # "rg1" with a seed of 1. Don't forget to plot it!
     
    @@ -273,7 +285,7 @@ 

    Examining diffusion across networks of different structure

    +data-lines="0" data-pipe="|>">
    rg1 <- play_diffusion(rg, seeds = 1)
     plot(rg1)
    @@ -294,12 +306,13 @@

    Varying seed nodes

    network. To see whether this is true, try choosing the sixteenth (middle) node and see whether the result is any different.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    rg2 <- play_diffusion(rg, seeds = 16)
     plot(rg2)
    @@ -315,31 +328,33 @@

    Varying seed nodes

    Choosing the first four nodes we can see that the process is jump-started, but doesn’t really conclude that much faster.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    # Remember we want to see the first four nodes.
     
     plot(play_diffusion(rg, seeds = ____))
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(rg, seeds = 1:4))

    But what if we seed the network at three different places? Here we can use node_is_random() to randomly select some nodes to seed. Try it with four randomly-selected nodes and see what you get.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    # We will be using the node_is_random() within the seed argument to random select 
     # 4 nodes
     
    @@ -347,7 +362,7 @@ 

    Varying seed nodes

    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(rg, seeds = node_is_random(rg, 4)))
    @@ -369,12 +384,13 @@

    Varying networks

    diffusion on the lattice network, one with the first node as seed and again one on the last.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(la, seeds = 1))/
     plot(play_diffusion(la, seeds = 16))
    @@ -400,12 +416,13 @@

    Varying networks

    minimum of some measure.
    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(sf, seeds = 10, steps = 10)) / 
     plot(play_diffusion(sf, seeds = node_is_random(sf), steps = 10)) /
     plot(play_diffusion(sf, seeds = node_is_max(node_degree(sf)), steps = 10)) /
    @@ -431,12 +448,13 @@ 

    Varying thresholds

    will not lead to any diffusion process unless there are two seeds and that they are in another nodes neighbourhood.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(rg, seeds = 1, thresholds = 1))/
     plot(play_diffusion(rg, seeds = 1, thresholds = 2))/
     plot(play_diffusion(rg, seeds = 1:2, thresholds = 2))/
    @@ -457,12 +475,12 @@ 

    Varying thresholds

    scale-free network.

    +data-lines="0" data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(sf, seeds = 1, thresholds = 2))
    @@ -479,19 +497,20 @@

    Varying thresholds

    become infected. Try thresholds of 0.1, 0.25, and 0.5 on two seeds and 10 steps.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(sf, seeds = 1:2, thresholds = ____, steps = ____))/
     plot(play_diffusion(sf, seeds = 1:2, thresholds = ____, steps = ____))/
     plot(play_diffusion(sf, seeds = 1:2, thresholds = ____, steps = ____))
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(sf, seeds = 1:2, thresholds = 0.1, steps = 10))/
     plot(play_diffusion(sf, seeds = 1:2, thresholds = 0.25, steps = 10))/
     plot(play_diffusion(sf, seeds = 1:2, thresholds = 0.5, steps = 10))
    @@ -513,18 +532,19 @@

    Varying thresholds

    0.1 for the first 10 and 0.25 for the latter group of 22 nodes, and another diffusion where the threshold levels are reversed.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(sf, thresholds = c(rep(____,____), rep(____,____))))/
     plot(play_diffusion(sf, thresholds = c(rep(____,____), rep(____,____))))
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(sf, thresholds = c(rep(0.1,10), rep(0.25,22))))/
     plot(play_diffusion(sf, thresholds = c(rep(0.25,10), rep(0.1,22))))
    @@ -560,12 +580,12 @@

    Running multiple simulations

    only 1/2 cases is contagion successful.

    +data-lines="0" data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    # Remember, we are looking at the random network from before, "rd", with 
     # a transmissibility parameter of 0.5, 5 times, and 10 steps.
     
    @@ -573,7 +593,7 @@ 

    Running multiple simulations

    +data-lines="0" data-pipe="|>">
    plot(play_diffusions(rd, transmissibility = 0.5, times = 5, steps = 10))

    Note that in this plot the number of new infections is not plotted, @@ -590,12 +610,13 @@

    SIR models

    argument. Let’s try a rate of recovery of 0.20, which means that it’ll take an infected node on average 5 steps (days?) to recover.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    # Remember, we are still looking at the random network, "rd", with a 
     # recovery rate of 20 percent.
     
    @@ -603,7 +624,7 @@ 

    SIR models

    +data-lines="0" data-pipe="|>">
    plot(play_diffusions(rd, recovery = 0.2))

    What we see in these kinds of models is typically a spike in @@ -618,12 +639,13 @@

    SIRS models

    additional waning parameter of 0.05. Play a single diffusion so that you can see what’s going on in a particular run.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(rd, recovery = 0.25, waning = 0.05))
    @@ -643,12 +665,13 @@

    SEIR models

    proportion (try 0.25, approx four days). Play a single diffusion so that you can see what’s going on in a particular run.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    plot(play_diffusion(rd, latency = 0.25, recovery = 0.25))
    @@ -674,18 +697,19 @@

    Expectations of convergence and consensus

    aperiodic.
    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    # By default is_connected() will check whether a directed network
     # is strongly connected.
    +data-lines="0" data-pipe="|>">
    is_connected(ison_networkers)
     is_aperiodic(ison_networkers)
    @@ -711,12 +735,13 @@

    Playing the DeGroot learning model

    been distributed. Then play the learning model with these beliefs, and plot the result.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    beliefs <- rbinom(network_nodes(____), 1, prob = 0.25)
     ____ %>% mutate(____ = beliefs) %>% autographr(node_color = "____")
     netlearn <- play_learning(____, ____)
    @@ -724,7 +749,7 @@ 

    Playing the DeGroot learning model

    +data-lines="0" data-pipe="|>">
    beliefs <- rbinom(network_nodes(ison_networkers), 1, prob = 0.25)
     ison_networkers %>% mutate(beliefs = beliefs) %>% autographr(node_color = "beliefs")
     netlearn <- play_learning(ison_networkers, beliefs)
    @@ -786,7 +811,7 @@ 

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r")), code_check = NULL, + exercise = "TRUE", purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, check = NULL, solution = structure("nw <- to_undirected(to_unnamed(ison_networkers))", chunk_opts = list( label = "create-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, @@ -803,9 +828,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "create", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "create, echo = TRUE, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "create, echo = TRUE, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -825,8 +850,8 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "create-nwstructure", code = "", opts = list(label = "\"create-nwstructure\"", - echo = "TRUE", exercise = "TRUE"), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure(c("# Here is the solution:", + echo = "TRUE", exercise = "TRUE", purl = "FALSE"), engine = "r")), + code_check = NULL, error_check = NULL, check = NULL, solution = structure(c("# Here is the solution:", "", "rg <- create_ring(nw, width = 2)", "la <- create_lattice(nw)", "rd <- generate_random(nw, with_attr = FALSE)", "sf <- generate_scalefree(nw, 0.025)", "sw <- generate_smallworld(nw, 0.025)", "autographr(nw) + ggtitle(\"Networkers\") +", @@ -849,9 +874,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "create-nwstructure", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "create-nwstructure, echo = TRUE, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "create-nwstructure, echo = TRUE, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -871,10 +896,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "ring", - code = "", opts = list(label = "\"ring\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure(c("rg1 <- play_diffusion(rg, seeds = 1)", + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "ring", code = "", opts = list(label = "\"ring\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure(c("rg1 <- play_diffusion(rg, seeds = 1)", "plot(rg1)"), chunk_opts = list(label = "ring-solution")), tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, @@ -891,9 +917,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "ring", exercise = TRUE, exercise.setup = "create", - code = "", out.width.px = 624, out.height.px = 384, params.src = "ring, exercise = TRUE, exercise.setup = \"create\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "ring, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -913,10 +939,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "ring2", - code = "", opts = list(label = "\"ring2\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure(c("rg2 <- play_diffusion(rg, seeds = 16)", + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "ring2", code = "", opts = list(label = "\"ring2\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure(c("rg2 <- play_diffusion(rg, seeds = 16)", "plot(rg2)"), chunk_opts = list(label = "ring2-solution")), tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, @@ -933,9 +960,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "ring2", exercise = TRUE, exercise.setup = "create", - code = "", out.width.px = 624, out.height.px = 384, params.src = "ring2, exercise = TRUE, exercise.setup = \"create\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "ring2, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -944,10 +971,10 @@

    Playing the DeGroot learning model

    @@ -976,10 +1003,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "ring3", - code = "", opts = list(label = "\"ring3\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure("plot(play_diffusion(rg, seeds = 1:4))", chunk_opts = list( + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "ring3", code = "", opts = list(label = "\"ring3\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure("plot(play_diffusion(rg, seeds = 1:4))", chunk_opts = list( label = "ring3-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -995,9 +1023,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "ring3", exercise = TRUE, exercise.setup = "create", - code = "", out.width.px = 624, out.height.px = 384, params.src = "ring3, exercise = TRUE, exercise.setup = \"create\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "ring3, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1017,10 +1045,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "ring4", - code = "", opts = list(label = "\"ring4\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure("plot(play_diffusion(rg, seeds = node_is_random(rg, 4)))", chunk_opts = list( + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "ring4", code = "", opts = list(label = "\"ring4\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure("plot(play_diffusion(rg, seeds = node_is_random(rg, 4)))", chunk_opts = list( label = "ring4-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -1036,9 +1065,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "ring4", exercise = TRUE, exercise.setup = "create", - code = "", out.width.px = 624, out.height.px = 384, params.src = "ring4, exercise = TRUE, exercise.setup = \"create\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "ring4, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1048,10 +1077,10 @@

    Playing the DeGroot learning model

    @@ -1080,10 +1109,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "lattice", - code = "", opts = list(label = "\"lattice\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure(c("plot(play_diffusion(la, seeds = 1))/", + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "lattice", code = "", opts = list(label = "\"lattice\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure(c("plot(play_diffusion(la, seeds = 1))/", "plot(play_diffusion(la, seeds = 16))"), chunk_opts = list( label = "lattice-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, @@ -1100,10 +1130,10 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "lattice", exercise = TRUE, exercise.setup = "create", code = "", out.width.px = 624, - out.height.px = 384, params.src = "lattice, exercise = TRUE, exercise.setup = \"create\"", + out.height.px = 384, params.src = "lattice, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1112,10 +1142,10 @@

    Playing the DeGroot learning model

    @@ -1144,10 +1174,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "scale", - code = "", opts = list(label = "\"scale\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure(c("plot(play_diffusion(sf, seeds = 10, steps = 10)) / ", + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "scale", code = "", opts = list(label = "\"scale\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure(c("plot(play_diffusion(sf, seeds = 10, steps = 10)) / ", "plot(play_diffusion(sf, seeds = node_is_random(sf), steps = 10)) /", "plot(play_diffusion(sf, seeds = node_is_max(node_degree(sf)), steps = 10)) /", "plot(play_diffusion(sf, seeds = node_is_min(node_degree(sf)), steps = 10))" @@ -1167,9 +1198,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "scale", exercise = TRUE, exercise.setup = "create", - code = "", out.width.px = 624, out.height.px = 384, params.src = "scale, exercise = TRUE, exercise.setup = \"create\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "scale, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1179,22 +1210,22 @@

    Playing the DeGroot learning model

    @@ -1225,10 +1256,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "complex", - code = "", opts = list(label = "\"complex\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure(c("plot(play_diffusion(rg, seeds = 1, thresholds = 1))/", + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "complex", code = "", opts = list(label = "\"complex\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure(c("plot(play_diffusion(rg, seeds = 1, thresholds = 1))/", "plot(play_diffusion(rg, seeds = 1, thresholds = 2))/", "plot(play_diffusion(rg, seeds = 1:2, thresholds = 2))/", "plot(play_diffusion(rg, seeds = c(1,16), thresholds = 2))" ), chunk_opts = list(label = "complex-solution")), tests = NULL, @@ -1247,10 +1279,10 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "complex", exercise = TRUE, exercise.setup = "create", code = "", out.width.px = 624, - out.height.px = 384, params.src = "complex, exercise = TRUE, exercise.setup = \"create\"", + out.height.px = 384, params.src = "complex, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1259,31 +1291,31 @@

    Playing the DeGroot learning model

    @@ -1301,10 +1333,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "sfcomplex", - code = "", opts = list(label = "\"sfcomplex\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure("plot(play_diffusion(sf, seeds = 1, thresholds = 2))", chunk_opts = list( + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "sfcomplex", code = "", opts = list(label = "\"sfcomplex\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure("plot(play_diffusion(sf, seeds = 1, thresholds = 2))", chunk_opts = list( label = "sfcomplex-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -1320,10 +1353,10 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "sfcomplex", exercise = TRUE, exercise.setup = "create", code = "", out.width.px = 624, - out.height.px = 384, params.src = "sfcomplex, exercise = TRUE, exercise.setup = \"create\"", + out.height.px = 384, params.src = "sfcomplex, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1333,10 +1366,10 @@

    Playing the DeGroot learning model

    @@ -1365,10 +1398,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "sfprop", - code = "", opts = list(label = "\"sfprop\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure(c("plot(play_diffusion(sf, seeds = 1:2, thresholds = 0.1, steps = 10))/", + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "sfprop", code = "", opts = list(label = "\"sfprop\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure(c("plot(play_diffusion(sf, seeds = 1:2, thresholds = 0.1, steps = 10))/", "plot(play_diffusion(sf, seeds = 1:2, thresholds = 0.25, steps = 10))/", "plot(play_diffusion(sf, seeds = 1:2, thresholds = 0.5, steps = 10))" ), chunk_opts = list(label = "sfprop-solution")), tests = NULL, @@ -1387,10 +1421,10 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "sfprop", exercise = TRUE, exercise.setup = "create", code = "", out.width.px = 624, - out.height.px = 384, params.src = "sfprop, exercise = TRUE, exercise.setup = \"create\"", + out.height.px = 384, params.src = "sfprop, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1399,10 +1433,10 @@

    Playing the DeGroot learning model

    @@ -1431,10 +1465,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "rand", - code = "", opts = list(label = "\"rand\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure(c("plot(play_diffusion(sf, thresholds = c(rep(0.1,10), rep(0.25,22))))/", + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "rand", code = "", opts = list(label = "\"rand\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure(c("plot(play_diffusion(sf, thresholds = c(rep(0.1,10), rep(0.25,22))))/", "plot(play_diffusion(sf, thresholds = c(rep(0.25,10), rep(0.1,22))))" ), chunk_opts = list(label = "rand-solution")), tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", @@ -1452,9 +1487,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "rand", exercise = TRUE, exercise.setup = "create", - code = "", out.width.px = 624, out.height.px = 384, params.src = "rand, exercise = TRUE, exercise.setup = \"create\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "rand, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1474,10 +1509,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "diffusions", - code = "", opts = list(label = "\"diffusions\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure("plot(play_diffusions(rd, transmissibility = 0.5, times = 5, steps = 10))", chunk_opts = list( + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "diffusions", code = "", opts = list(label = "\"diffusions\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure("plot(play_diffusions(rd, transmissibility = 0.5, times = 5, steps = 10))", chunk_opts = list( label = "diffusions-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -1493,10 +1529,10 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "diffusions", exercise = TRUE, exercise.setup = "create", code = "", out.width.px = 624, - out.height.px = 384, params.src = "diffusions, exercise = TRUE, exercise.setup = \"create\"", + out.height.px = 384, params.src = "diffusions, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1515,9 +1551,9 @@

    Playing the DeGroot learning model

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "sir", - code = "", opts = list(label = "\"sir\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure("plot(play_diffusions(rd, recovery = 0.2))", chunk_opts = list( + code = "", opts = list(label = "\"sir\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure("plot(play_diffusions(rd, recovery = 0.2))", chunk_opts = list( label = "sir-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -1533,9 +1569,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "sir", exercise = TRUE, code = "", - out.width.px = 624, out.height.px = 384, params.src = "sir, exercise = TRUE", + out.width.px = 624, out.height.px = 384, params.src = "sir, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1554,9 +1590,9 @@

    Playing the DeGroot learning model

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "sirs", - code = "", opts = list(label = "\"sirs\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure("plot(play_diffusion(rd, recovery = 0.25, waning = 0.05))", chunk_opts = list( + code = "", opts = list(label = "\"sirs\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure("plot(play_diffusion(rd, recovery = 0.25, waning = 0.05))", chunk_opts = list( label = "sirs-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -1572,9 +1608,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "sirs", exercise = TRUE, code = "", - out.width.px = 624, out.height.px = 384, params.src = "sirs, exercise = TRUE", + out.width.px = 624, out.height.px = 384, params.src = "sirs, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1584,10 +1620,10 @@

    Playing the DeGroot learning model

    @@ -1616,10 +1652,11 @@

    Playing the DeGroot learning model

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = "", chunks = list(list(label = "create", code = "", opts = list(label = "\"create\"", echo = "TRUE", - exercise = "TRUE"), engine = "r"), list(label = "seir", - code = "", opts = list(label = "\"seir\"", exercise = "TRUE", - exercise.setup = "\"create\""), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure("plot(play_diffusion(rd, latency = 0.25, recovery = 0.25))", chunk_opts = list( + exercise = "TRUE", purl = "FALSE"), engine = "r"), list( + label = "seir", code = "", opts = list(label = "\"seir\"", + exercise = "TRUE", exercise.setup = "\"create\"", purl = "FALSE"), + engine = "r")), code_check = NULL, error_check = NULL, check = NULL, + solution = structure("plot(play_diffusion(rd, latency = 0.25, recovery = 0.25))", chunk_opts = list( label = "seir-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, @@ -1635,9 +1672,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "seir", exercise = TRUE, exercise.setup = "create", - code = "", out.width.px = 624, out.height.px = 384, params.src = "seir, exercise = TRUE, exercise.setup = \"create\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "seir, exercise = TRUE, exercise.setup = \"create\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1656,11 +1693,11 @@

    Playing the DeGroot learning model

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "aperiod", - code = "", opts = list(label = "\"aperiod\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("is_connected(ison_networkers)", "is_aperiodic(ison_networkers)" - ), chunk_opts = list(label = "aperiod-solution")), tests = NULL, - options = list(eval = FALSE, echo = TRUE, results = "markup", + code = "", opts = list(label = "\"aperiod\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("is_connected(ison_networkers)", + "is_aperiodic(ison_networkers)"), chunk_opts = list(label = "aperiod-solution")), + tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, highlight = FALSE, size = "normalsize", background = "#F7F7F7", strip.white = TRUE, cache = 0, @@ -1675,9 +1712,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "aperiod", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "aperiod, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "aperiod, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1686,10 +1723,10 @@

    Playing the DeGroot learning model

    @@ -1717,9 +1754,9 @@

    Playing the DeGroot learning model

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "degroot", - code = "", opts = list(label = "\"degroot\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("beliefs <- rbinom(network_nodes(ison_networkers), 1, prob = 0.25)", + code = "", opts = list(label = "\"degroot\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("beliefs <- rbinom(network_nodes(ison_networkers), 1, prob = 0.25)", "ison_networkers %>% mutate(beliefs = beliefs) %>% autographr(node_color = \"beliefs\")", "netlearn <- play_learning(ison_networkers, beliefs)", "plot(netlearn)" ), chunk_opts = list(label = "degroot-solution")), tests = NULL, @@ -1738,9 +1775,9 @@

    Playing the DeGroot learning model

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "degroot", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "degroot, exercise = TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "degroot, exercise = TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1750,27 +1787,27 @@

    Playing the DeGroot learning model

    diff --git a/inst/tutorials/tutorial8/regression.Rmd b/inst/tutorials/tutorial8/regression.Rmd index 4d6a25ed..766af398 100644 --- a/inst/tutorials/tutorial8/regression.Rmd +++ b/inst/tutorials/tutorial8/regression.Rmd @@ -31,32 +31,32 @@ so to make this simpler, let's concentrate on: Fortunately, all these data cleaning moves are easy to do in `{manynet}`, and can be seen in the following chunk in order: -```{r friends, exercise=TRUE} +```{r friends, exercise=TRUE, purl = FALSE} ``` -```{r friends-hint-1} +```{r friends-hint-1, purl = FALSE} # since the dataset is a 'signed' graph, we want to get just the # positively signed ties to get the friendship graph # (and lose the enmity relations) to_unsigned(____, keep = "positive") ``` -```{r friends-hint-2} +```{r friends-hint-2, purl = FALSE} # to_giant() is a quick easy way to get the giant/main component to_giant(____) ``` -```{r friends-hint-3} +```{r friends-hint-3, purl = FALSE} to_subgraph(____, Appearances >= mean(Appearances)) ``` -```{r friends-hint-4} +```{r friends-hint-4, purl = FALSE} # don't forget to assign the results! marvel_friends <- ____ ``` -```{r friends-hint-5} +```{r friends-hint-5, purl = FALSE} marvel_friends <- to_unsigned(ison_marvel_relationships, keep = "positive") marvel_friends <- to_giant(marvel_friends) marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances)) @@ -75,11 +75,11 @@ Recall that this data has several nodal attributes. Explore a couple of these attributes, "Gender" and "PowerOrigin" visually using `autographr()`. -```{r plotfriends, exercise=TRUE, exercise.setup = "friends-solution"} +```{r plotfriends, exercise=TRUE, exercise.setup = "friends-solution", purl = FALSE} ``` -```{r plotfriends-hint} +```{r plotfriends-hint, purl = FALSE} # Since both Gender and PowerOrigin are categorical variables # you will need to use two different aesthetic dimensions to # represent them together. @@ -117,11 +117,11 @@ A perfectly homogeneous group would receive a score of 0, while a perfectly heterogeneous group (with members spread evenly over the maximum categories) would receive a score of 1. -```{r blau, exercise=TRUE, exercise.setup = "friends-solution"} +```{r blau, exercise=TRUE, exercise.setup = "friends-solution", purl = FALSE} ``` -```{r blau-hint} +```{r blau-hint, purl = FALSE} network_diversity(____, ____) ``` @@ -142,11 +142,11 @@ For example, we might be interested in whether our comic book heroes are equally gender diverse across their (power) origin stories, or equally intellectually diverse across gender.^[Note that this works for calculated categorical variables too, such as cluster/group assignment from community detection or equivalence classes.] -```{r crossref, exercise=TRUE, exercise.setup = "friends-solution"} +```{r crossref, exercise=TRUE, exercise.setup = "friends-solution", purl = FALSE} ``` -```{r crossref-hint} +```{r crossref-hint, purl = FALSE} network_diversity(____, ____, ____) ``` @@ -182,11 +182,11 @@ As such, an EI index of -1 suggests perfect homophily, whereas an EI index of +1 Check how homophilic three variables in the network are, "Gender", "PowerOrigin", and "Attractive". -```{r ei, exercise=TRUE, exercise.setup = "friends-solution"} +```{r ei, exercise=TRUE, exercise.setup = "friends-solution", purl = FALSE} ``` -```{r ei-hint} +```{r ei-hint, purl = FALSE} network_heterophily(____, ____) ``` @@ -196,7 +196,7 @@ network_heterophily(____, ____) (obs.attract <- network_heterophily(marvel_friends, "Attractive")) ``` -```{r homophily-present, echo=FALSE} +```{r homophily-present, echo=FALSE, purl = FALSE} question("For which variables is there a signal of homophily according to the EI index? (Choose all that apply)", answer("Gender", correct = TRUE, @@ -231,15 +231,15 @@ such as `test_random()`. Plot the results of running this function with respect to the EI index on each of the three variables used above one thousand times. -```{r rando, exercise=TRUE, exercise.setup = "friends-solution"} +```{r rando, exercise=TRUE, exercise.setup = "friends-solution", purl = FALSE} ``` -```{r rando-hint-1} +```{r rando-hint-1, purl = FALSE} rand.____ <- test_random(____, FUN = ____, attribute = ____, times = ___) ``` -```{r rando-hint-2} +```{r rando-hint-2, purl = FALSE} plot(rand.____) ``` @@ -287,11 +287,11 @@ Permuting the network retains the structure of the network, but reassigns any labels (variables) randomly. Let's first plot the observed data and some permuted data next to each other. -```{r perm, exercise=TRUE, exercise.setup = "friends-solution"} +```{r perm, exercise=TRUE, exercise.setup = "friends-solution", purl = FALSE} ``` -```{r perm-hint} +```{r perm-hint, purl = FALSE} autographr(generate_permutation(____, with_attr = TRUE), ____) ``` @@ -312,11 +312,11 @@ attributes, but it is just a single permutation. Let's try a test that runs this over a succession of permutations, just as we did with random graphs. -```{r testpermute, exercise=TRUE, exercise.setup = "rando-solution"} +```{r testpermute, exercise=TRUE, exercise.setup = "rando-solution", purl = FALSE} ``` -```{r testpermute-hint} +```{r testpermute-hint, purl = FALSE} test_permutation(____, FUN = ____, attribute = ____, times = ____) ``` @@ -334,7 +334,7 @@ Again, we see that there is perhaps nothing so surprising that we got the homoph for gender that we did, but the lack of power origin heterophily is surprising. Plot the results for gender and power according to the random and permutation baselines. -```{r cugqap, exercise=TRUE, exercise.setup = "testpermute-solution"} +```{r cugqap, exercise=TRUE, exercise.setup = "testpermute-solution", purl = FALSE} ``` @@ -356,7 +356,7 @@ You may recognise some of the names. The main network consists of 32 scholars with directed ties weighted by the total number of messages sent from $i$ to $j$ over the period of the study. Nodal attributes collected include the primary discipline and number of citations in the social science citation index at the start of the study. -```{r introeies, exercise=TRUE} +```{r introeies, exercise=TRUE, purl = FALSE} ison_networkers autographr(ison_networkers, node_color = "Discipline") @@ -371,20 +371,20 @@ and come up with a couple of key hypotheses: Let's start with a pretty maximally specified model (note that it doesn't make sense to include both ego and alter effects because these are undirected). -```{r qapmax, exercise=TRUE, exercise.timelimit = 240} +```{r qapmax, exercise=TRUE, exercise.timelimit = 240, purl = FALSE} ``` -```{r qapmax-hint-1} +```{r qapmax-hint-1, purl = FALSE} network_reg(____, ison_networkers, times = 200) ``` -```{r qapmax-hint-2} +```{r qapmax-hint-2, purl = FALSE} weight ~ alter(Citations) + sim(Citations) + alter(Discipline) + same(Discipline) ``` -```{r qapmax-hint-3} +```{r qapmax-hint-3, purl = FALSE} model1 <- network_reg(weight ~ alter(Citations) + sim(Citations) + alter(Discipline) + same(Discipline), ison_networkers, times = 200) @@ -400,7 +400,7 @@ We can use tidy methods to get the salient information from this model, and `{migraph}` includes also a plot method for these results to facilitate the quick interpretation of these results. -```{r qapinterp, exercise=TRUE, exercise.setup = "qapmax-solution"} +```{r qapinterp, exercise=TRUE, exercise.setup = "qapmax-solution", purl = FALSE} ``` @@ -415,7 +415,7 @@ with the fitted coefficient from the data as a red dot. Subtle lines are used to indicate 95%, but here the distributions are rendered so wide that they are often not seen. -```{r qap-interp, echo=FALSE} +```{r qap-interp, echo=FALSE, purl = FALSE} question("What can we say from these results?", answer("Researchers send more messages to those who are cited more", message = "Looks like alter Citations is not significant."), diff --git a/inst/tutorials/tutorial8/regression.html b/inst/tutorials/tutorial8/regression.html index 40be39ec..b8e3f2cd 100644 --- a/inst/tutorials/tutorial8/regression.html +++ b/inst/tutorials/tutorial8/regression.html @@ -13,7 +13,7 @@ - + Regression @@ -128,12 +128,13 @@

    Setting up

    {manynet}, and can be seen in the following chunk in order:

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    # since the dataset is a 'signed' graph, we want to get just the
     # positively signed ties to get the friendship graph 
     # (and lose the enmity relations)
    @@ -141,24 +142,24 @@ 

    Setting up

    +data-lines="0" data-pipe="|>">
    # to_giant() is a quick easy way to get the giant/main component
     to_giant(____)
    +data-lines="0" data-pipe="|>">
    to_subgraph(____, Appearances >= mean(Appearances))
    +data-lines="0" data-pipe="|>">
    # don't forget to assign the results!
     marvel_friends <- ____
    +data-lines="0" data-pipe="|>">
    marvel_friends <- to_unsigned(ison_marvel_relationships, keep = "positive")
     marvel_friends <- to_giant(marvel_friends)
     marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))
    @@ -166,7 +167,7 @@ 

    Setting up

    +data-lines="0" data-pipe="|>">

    This gives us a dataset of nearly twenty characters and a little more @@ -175,12 +176,12 @@

    Setting up

    visually using autographr().

    +data-lines="0" data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    # Since both Gender and PowerOrigin are categorical variables
     # you will need to use two different aesthetic dimensions to
     # represent them together.
    @@ -191,7 +192,7 @@ 

    Setting up

    +data-lines="0" data-pipe="|>">
    autographr(marvel_friends, 
                node_shape = "Gender",
                node_color = "PowerOrigin")
    @@ -218,17 +219,18 @@

    Calculating Blau index

    perfectly heterogeneous group (with members spread evenly over the maximum categories) would receive a score of 1.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    network_diversity(____, ____)
    +data-lines="0" data-pipe="|>">
    network_diversity(marvel_friends, "Gender")
     network_diversity(marvel_friends, "PowerOrigin")
     network_diversity(marvel_friends, "Attractive")
    @@ -244,17 +246,18 @@ 

    Calculating Blau index

    across gender.1

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    network_diversity(____, ____, ____)
    +data-lines="0" data-pipe="|>">
    network_diversity(marvel_friends, "Gender", "PowerOrigin")
     network_diversity(marvel_friends, "Intellect", "Gender")
    @@ -289,21 +292,23 @@

    Calculating EI index

    class="math inline">\(I\) is the number of ties present within a variable’s categories (i.e. internal). As such, an EI index of -1 suggests perfect homophily, whereas an EI index of +1 suggests perfect -heterophily.

    +heterophily. (This is why the function is called +network_heterophily()).

    Check how homophilic three variables in the network are, “Gender”, “PowerOrigin”, and “Attractive”.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    -
    network_homophily(____, ____)
    +data-lines="0" data-pipe="|>"> +
    network_heterophily(____, ____)
    +data-lines="0" data-pipe="|>">
    (obs.gender <- network_heterophily(marvel_friends, "Gender"))
     (obs.powers <- network_heterophily(marvel_friends, "PowerOrigin")) 
     (obs.attract <- network_heterophily(marvel_friends, "Attractive")) 
    @@ -337,34 +342,24 @@

    Conditional uniform graph tests

    with respect to the EI index on each of the three variables used above one thousand times.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    rand.____ <- test_random(____, FUN = ____, attribute = ____, times = ___)
    +data-lines="0" data-pipe="|>">
    plot(rand.____)
    -
    rand.gender <- test_random(marvel_friends, 
    -                            network_homophily, attribute = "Gender", 
    -                           times = 1000)
    -rand.power <- test_random(marvel_friends, 
    -                           network_homophily, attribute = "PowerOrigin", 
    -                           times = 1000)
    -rand.attract <- test_random(marvel_friends, 
    -                             network_homophily, attribute = "Attractive", 
    -                           times = 1000)
    -plot(rand.gender)
    -plot(rand.power)
    -plot(rand.attract)
    +data-lines="0" data-pipe="|>"> +

    The plots of these results use a dotted vertical line for 0 where this is in bounds, a red vertical line for the observed score, and a @@ -394,17 +389,18 @@

    Quadratic assignment procedure tests

    (variables) randomly. Let’s first plot the observed data and some permuted data next to each other.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    autographr(generate_permutation(____, with_attr = TRUE), ____)
    +data-lines="0" data-pipe="|>">
    old <- autographr(marvel_friends, 
                       labels = FALSE, node_size = 6, 
                       node_color = "PowerOrigin", 
    @@ -421,36 +417,32 @@ 

    Quadratic assignment procedure tests

    as we did with random graphs.

    +data-lines="0" data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    test_permutation(____, FUN = ____, attribute = ____,
                                     times = ____)
    -
    perm.gender <- test_permutation(marvel_friends, 
    -                                network_heterophily, attribute = "Gender",
    -                                times = 1000)
    -perm.power <- test_permutation(marvel_friends, 
    -                               network_heterophily, attribute = "PowerOrigin",
    -                                times = 1000)
    +data-lines="0" data-pipe="|>"> +

    Again, we see that there is perhaps nothing so surprising that we got the homophily score for gender that we did, but the lack of power origin heterophily is surprising. Plot the results for gender and power according to the random and permutation baselines.

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    (plot(rand.gender) | plot(rand.power)) /
     (plot(perm.gender) | plot(perm.power))
    @@ -475,7 +467,7 @@

    Network linear models

    study.

    +data-lines="0" data-pipe="|>">
    ison_networkers
     autographr(ison_networkers,
                node_color = "Discipline")
    @@ -494,30 +486,31 @@

    Network linear models

    doesn’t make sense to include both ego and alter effects because these are undirected).

    +data-diagnostics="1" data-startover="1" data-lines="0" +data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    network_reg(____, ison_networkers, times = 200)
    +data-lines="0" data-pipe="|>">
    weight ~ alter(Citations) + sim(Citations) + 
                           alter(Discipline) + same(Discipline)
    +data-lines="0" data-pipe="|>">
    model1 <- network_reg(weight ~ alter(Citations) + sim(Citations) + 
                           alter(Discipline) + same(Discipline), 
                           ison_networkers, times = 200)
    +data-lines="0" data-pipe="|>">

    We can use tidy methods to get the salient information from this @@ -525,12 +518,12 @@

    Network linear models

    results to facilitate the quick interpretation of these results.

    +data-lines="0" data-pipe="|>">
    +data-lines="0" data-pipe="|>">
    tidy(model1)
     glance(model1)
     plot(model1)
    @@ -589,9 +582,9 @@

    Network linear models

    "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "friends", - code = "", opts = list(label = "\"friends\"", exercise = "TRUE"), - engine = "r")), code_check = NULL, error_check = NULL, check = NULL, - solution = structure(c("marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")", + code = "", opts = list(label = "\"friends\"", exercise = "TRUE", + purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, + check = NULL, solution = structure(c("marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")", "marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))", "marvel_friends"), chunk_opts = list(label = "friends-solution")), tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", @@ -609,9 +602,9 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "friends", exercise = TRUE, - code = "", out.width.px = 624, out.height.px = 384, params.src = "friends, exercise=TRUE", + code = "", out.width.px = 624, out.height.px = 384, params.src = "friends, exercise=TRUE, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -633,9 +626,9 @@

    Network linear models

    chunks = list(list(label = "friends-solution", code = "marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")\nmarvel_friends <- to_giant(marvel_friends)\nmarvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))\nmarvel_friends", opts = list(label = "\"friends-solution\""), engine = "r"), list(label = "plotfriends", code = "", opts = list(label = "\"plotfriends\"", - exercise = "TRUE", exercise.setup = "\"friends-solution\""), - engine = "r")), code_check = NULL, error_check = NULL, - check = NULL, solution = structure(c("autographr(marvel_friends, ", + exercise = "TRUE", exercise.setup = "\"friends-solution\"", + purl = "FALSE"), engine = "r")), code_check = NULL, + error_check = NULL, check = NULL, solution = structure(c("autographr(marvel_friends, ", " node_shape = \"Gender\",", " node_color = \"PowerOrigin\")" ), chunk_opts = list(label = "plotfriends-solution")), tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", @@ -653,10 +646,10 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "plotfriends", exercise = TRUE, exercise.setup = "friends-solution", code = "", out.width.px = 624, - out.height.px = 384, params.src = "plotfriends, exercise=TRUE, exercise.setup = \"friends-solution\"", + out.height.px = 384, params.src = "plotfriends, exercise=TRUE, exercise.setup = \"friends-solution\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -678,9 +671,9 @@

    Network linear models

    chunks = list(list(label = "friends-solution", code = "marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")\nmarvel_friends <- to_giant(marvel_friends)\nmarvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))\nmarvel_friends", opts = list(label = "\"friends-solution\""), engine = "r"), list(label = "blau", code = "", opts = list(label = "\"blau\"", - exercise = "TRUE", exercise.setup = "\"friends-solution\""), - engine = "r")), code_check = NULL, error_check = NULL, - check = NULL, solution = structure(c("network_diversity(marvel_friends, \"Gender\")", + exercise = "TRUE", exercise.setup = "\"friends-solution\"", + purl = "FALSE"), engine = "r")), code_check = NULL, + error_check = NULL, check = NULL, solution = structure(c("network_diversity(marvel_friends, \"Gender\")", "network_diversity(marvel_friends, \"PowerOrigin\")", "network_diversity(marvel_friends, \"Attractive\")", "network_diversity(marvel_friends, \"Rich\")", "network_diversity(marvel_friends, \"Intellect\")" ), chunk_opts = list(label = "blau-solution")), tests = NULL, @@ -699,9 +692,9 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "blau", exercise = TRUE, exercise.setup = "friends-solution", - code = "", out.width.px = 624, out.height.px = 384, params.src = "blau, exercise=TRUE, exercise.setup = \"friends-solution\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "blau, exercise=TRUE, exercise.setup = \"friends-solution\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -723,9 +716,9 @@

    Network linear models

    chunks = list(list(label = "friends-solution", code = "marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")\nmarvel_friends <- to_giant(marvel_friends)\nmarvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))\nmarvel_friends", opts = list(label = "\"friends-solution\""), engine = "r"), list(label = "crossref", code = "", opts = list(label = "\"crossref\"", - exercise = "TRUE", exercise.setup = "\"friends-solution\""), - engine = "r")), code_check = NULL, error_check = NULL, - check = NULL, solution = structure(c("network_diversity(marvel_friends, \"Gender\", \"PowerOrigin\")", + exercise = "TRUE", exercise.setup = "\"friends-solution\"", + purl = "FALSE"), engine = "r")), code_check = NULL, + error_check = NULL, check = NULL, solution = structure(c("network_diversity(marvel_friends, \"Gender\", \"PowerOrigin\")", "network_diversity(marvel_friends, \"Intellect\", \"Gender\")" ), chunk_opts = list(label = "crossref-solution")), tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", @@ -743,10 +736,10 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "crossref", exercise = TRUE, exercise.setup = "friends-solution", code = "", out.width.px = 624, - out.height.px = 384, params.src = "crossref, exercise=TRUE, exercise.setup = \"friends-solution\"", + out.height.px = 384, params.src = "crossref, exercise=TRUE, exercise.setup = \"friends-solution\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -768,9 +761,9 @@

    Network linear models

    chunks = list(list(label = "friends-solution", code = "marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")\nmarvel_friends <- to_giant(marvel_friends)\nmarvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))\nmarvel_friends", opts = list(label = "\"friends-solution\""), engine = "r"), list(label = "ei", code = "", opts = list(label = "\"ei\"", - exercise = "TRUE", exercise.setup = "\"friends-solution\""), - engine = "r")), code_check = NULL, error_check = NULL, - check = NULL, solution = structure(c("(obs.gender <- network_heterophily(marvel_friends, \"Gender\"))", + exercise = "TRUE", exercise.setup = "\"friends-solution\"", + purl = "FALSE"), engine = "r")), code_check = NULL, + error_check = NULL, check = NULL, solution = structure(c("(obs.gender <- network_heterophily(marvel_friends, \"Gender\"))", "(obs.powers <- network_heterophily(marvel_friends, \"PowerOrigin\")) ", "(obs.attract <- network_heterophily(marvel_friends, \"Attractive\")) " ), chunk_opts = list(label = "ei-solution")), tests = NULL, @@ -789,9 +782,9 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "ei", exercise = TRUE, exercise.setup = "friends-solution", - code = "", out.width.px = 624, out.height.px = 384, params.src = "ei, exercise=TRUE, exercise.setup = \"friends-solution\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "ei, exercise=TRUE, exercise.setup = \"friends-solution\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -800,27 +793,27 @@

    Network linear models

    @@ -840,14 +833,14 @@

    Network linear models

    chunks = list(list(label = "friends-solution", code = "marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")\nmarvel_friends <- to_giant(marvel_friends)\nmarvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))\nmarvel_friends", opts = list(label = "\"friends-solution\""), engine = "r"), list(label = "rando", code = "", opts = list(label = "\"rando\"", - exercise = "TRUE", exercise.setup = "\"friends-solution\""), - engine = "r")), code_check = NULL, error_check = NULL, - check = NULL, solution = structure(c("rand.gender <- test_random(marvel_friends, ", - " network_homophily, attribute = \"Gender\", ", + exercise = "TRUE", exercise.setup = "\"friends-solution\"", + purl = "FALSE"), engine = "r")), code_check = NULL, + error_check = NULL, check = NULL, solution = structure(c("rand.gender <- test_random(marvel_friends, ", + " network_heterophily, attribute = \"Gender\", ", " times = 1000)", "rand.power <- test_random(marvel_friends, ", - " network_homophily, attribute = \"PowerOrigin\", ", + " network_heterophily, attribute = \"PowerOrigin\", ", " times = 1000)", "rand.attract <- test_random(marvel_friends, ", - " network_homophily, attribute = \"Attractive\", ", + " network_heterophily, attribute = \"Attractive\", ", " times = 1000)", "plot(rand.gender)", "plot(rand.power)", "plot(rand.attract)"), chunk_opts = list( label = "rando-solution")), tests = NULL, options = list( @@ -865,9 +858,9 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "rando", exercise = TRUE, exercise.setup = "friends-solution", - code = "", out.width.px = 624, out.height.px = 384, params.src = "rando, exercise=TRUE, exercise.setup = \"friends-solution\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "rando, exercise=TRUE, exercise.setup = \"friends-solution\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -889,9 +882,9 @@

    Network linear models

    chunks = list(list(label = "friends-solution", code = "marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")\nmarvel_friends <- to_giant(marvel_friends)\nmarvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))\nmarvel_friends", opts = list(label = "\"friends-solution\""), engine = "r"), list(label = "perm", code = "", opts = list(label = "\"perm\"", - exercise = "TRUE", exercise.setup = "\"friends-solution\""), - engine = "r")), code_check = NULL, error_check = NULL, - check = NULL, solution = structure(c("old <- autographr(marvel_friends, ", + exercise = "TRUE", exercise.setup = "\"friends-solution\"", + purl = "FALSE"), engine = "r")), code_check = NULL, + error_check = NULL, check = NULL, solution = structure(c("old <- autographr(marvel_friends, ", " labels = FALSE, node_size = 6, ", " node_color = \"PowerOrigin\", ", " node_shape = \"Gender\")", "new <- autographr(generate_permutation(marvel_friends, with_attr = TRUE),", " labels = FALSE, node_size = 6,", " node_color = \"PowerOrigin\",", @@ -912,9 +905,9 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "perm", exercise = TRUE, exercise.setup = "friends-solution", - code = "", out.width.px = 624, out.height.px = 384, params.src = "perm, exercise=TRUE, exercise.setup = \"friends-solution\"", + code = "", out.width.px = 624, out.height.px = 384, params.src = "perm, exercise=TRUE, exercise.setup = \"friends-solution\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -932,13 +925,13 @@

    Network linear models

    learnr:::store_exercise_cache(structure(list(label = "testpermute", global_setup = structure(c("library(learnr)", "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", - include = FALSE)), setup = "marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")\nmarvel_friends <- to_giant(marvel_friends)\nmarvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))\nmarvel_friends", - chunks = list(list(label = "friends-solution", code = "marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")\nmarvel_friends <- to_giant(marvel_friends)\nmarvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))\nmarvel_friends", - opts = list(label = "\"friends-solution\""), engine = "r"), + include = FALSE)), setup = "rand.gender <- test_random(marvel_friends, \n network_heterophily, attribute = \"Gender\", \n times = 1000)\nrand.power <- test_random(marvel_friends, \n network_heterophily, attribute = \"PowerOrigin\", \n times = 1000)\nrand.attract <- test_random(marvel_friends, \n network_heterophily, attribute = \"Attractive\", \n times = 1000)\nplot(rand.gender)\nplot(rand.power)\nplot(rand.attract)", + chunks = list(list(label = "rando-solution", code = "rand.gender <- test_random(marvel_friends, \n network_heterophily, attribute = \"Gender\", \n times = 1000)\nrand.power <- test_random(marvel_friends, \n network_heterophily, attribute = \"PowerOrigin\", \n times = 1000)\nrand.attract <- test_random(marvel_friends, \n network_heterophily, attribute = \"Attractive\", \n times = 1000)\nplot(rand.gender)\nplot(rand.power)\nplot(rand.attract)", + opts = list(label = "\"rando-solution\""), engine = "r"), list(label = "testpermute", code = "", opts = list(label = "\"testpermute\"", - exercise = "TRUE", exercise.setup = "\"friends-solution\""), - engine = "r")), code_check = NULL, error_check = NULL, - check = NULL, solution = structure(c("perm.gender <- test_permutation(marvel_friends, ", + exercise = "TRUE", exercise.setup = "\"rando-solution\"", + purl = "FALSE"), engine = "r")), code_check = NULL, + error_check = NULL, check = NULL, solution = structure(c("perm.gender <- test_permutation(marvel_friends, ", " network_heterophily, attribute = \"Gender\",", " times = 1000)", "perm.power <- test_permutation(marvel_friends, ", " network_heterophily, attribute = \"PowerOrigin\",", @@ -958,10 +951,10 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "testpermute", exercise = TRUE, - exercise.setup = "friends-solution", code = "", out.width.px = 624, - out.height.px = 384, params.src = "testpermute, exercise=TRUE, exercise.setup = \"friends-solution\"", + exercise.setup = "rando-solution", code = "", out.width.px = 624, + out.height.px = 384, params.src = "testpermute, exercise=TRUE, exercise.setup = \"rando-solution\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -979,13 +972,13 @@

    Network linear models

    learnr:::store_exercise_cache(structure(list(label = "cugqap", global_setup = structure(c("library(learnr)", "library(manynet)", "library(migraph)", "library(patchwork)", "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", - include = FALSE)), setup = "marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")\nmarvel_friends <- to_giant(marvel_friends)\nmarvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))\nmarvel_friends", - chunks = list(list(label = "friends-solution", code = "marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")\nmarvel_friends <- to_giant(marvel_friends)\nmarvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))\nmarvel_friends", - opts = list(label = "\"friends-solution\""), engine = "r"), + include = FALSE)), setup = "perm.gender <- test_permutation(marvel_friends, \n network_heterophily, attribute = \"Gender\",\n times = 1000)\nperm.power <- test_permutation(marvel_friends, \n network_heterophily, attribute = \"PowerOrigin\",\n times = 1000)", + chunks = list(list(label = "testpermute-solution", code = "perm.gender <- test_permutation(marvel_friends, \n network_heterophily, attribute = \"Gender\",\n times = 1000)\nperm.power <- test_permutation(marvel_friends, \n network_heterophily, attribute = \"PowerOrigin\",\n times = 1000)", + opts = list(label = "\"testpermute-solution\""), engine = "r"), list(label = "cugqap", code = "", opts = list(label = "\"cugqap\"", - exercise = "TRUE", exercise.setup = "\"friends-solution\""), - engine = "r")), code_check = NULL, error_check = NULL, - check = NULL, solution = structure(c("(plot(rand.gender) | plot(rand.power)) /", + exercise = "TRUE", exercise.setup = "\"testpermute-solution\"", + purl = "FALSE"), engine = "r")), code_check = NULL, + error_check = NULL, check = NULL, solution = structure(c("(plot(rand.gender) | plot(rand.power)) /", "(plot(perm.gender) | plot(perm.power))"), chunk_opts = list( label = "cugqap-solution")), tests = NULL, options = list( eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, @@ -1002,10 +995,10 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "cugqap", exercise = TRUE, - exercise.setup = "friends-solution", code = "", out.width.px = 624, - out.height.px = 384, params.src = "cugqap, exercise=TRUE, exercise.setup = \"friends-solution\"", + exercise.setup = "testpermute-solution", code = "", out.width.px = 624, + out.height.px = 384, params.src = "cugqap, exercise=TRUE, exercise.setup = \"testpermute-solution\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1025,7 +1018,7 @@

    Network linear models

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "introeies", code = "ison_networkers\nautographr(ison_networkers,\n node_color = \"Discipline\")", - opts = list(label = "\"introeies\"", exercise = "TRUE"), + opts = list(label = "\"introeies\"", exercise = "TRUE", purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL, check = NULL, solution = NULL, tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, @@ -1042,11 +1035,11 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "introeies", exercise = TRUE, code = c("ison_networkers", "autographr(ison_networkers,", " node_color = \"Discipline\")"), out.width.px = 624, - out.height.px = 384, params.src = "introeies, exercise=TRUE", + out.height.px = 384, params.src = "introeies, exercise=TRUE, purl = FALSE", fig.num = 0, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1066,8 +1059,8 @@

    Network linear models

    "knitr::opts_chunk$set(echo = FALSE)"), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL, chunks = list(list(label = "qapmax", code = "", opts = list(label = "\"qapmax\"", exercise = "TRUE", - exercise.timelimit = "240"), engine = "r")), code_check = NULL, - error_check = NULL, check = NULL, solution = structure(c("model1 <- network_reg(weight ~ alter(Citations) + sim(Citations) + ", + exercise.timelimit = "240", purl = "FALSE"), engine = "r")), + code_check = NULL, error_check = NULL, check = NULL, solution = structure(c("model1 <- network_reg(weight ~ alter(Citations) + sim(Citations) + ", " alter(Discipline) + same(Discipline), ", " ison_networkers, times = 200)"), chunk_opts = list( label = "qapmax-solution")), tests = NULL, options = list( @@ -1085,10 +1078,10 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "qapmax", exercise = TRUE, exercise.timelimit = 240, code = "", out.width.px = 624, - out.height.px = 384, params.src = "qapmax, exercise=TRUE, exercise.timelimit = 240", + out.height.px = 384, params.src = "qapmax, exercise=TRUE, exercise.timelimit = 240, purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1110,10 +1103,10 @@

    Network linear models

    chunks = list(list(label = "qapmax-solution", code = "model1 <- network_reg(weight ~ alter(Citations) + sim(Citations) + \n alter(Discipline) + same(Discipline), \n ison_networkers, times = 200)", opts = list(label = "\"qapmax-solution\""), engine = "r"), list(label = "qapinterp", code = "", opts = list(label = "\"qapinterp\"", - exercise = "TRUE", exercise.setup = "\"qapmax-solution\""), - engine = "r")), code_check = NULL, error_check = NULL, - check = NULL, solution = structure(c("tidy(model1)", "glance(model1)", - "plot(model1)"), chunk_opts = list(label = "qapinterp-solution")), + exercise = "TRUE", exercise.setup = "\"qapmax-solution\"", + purl = "FALSE"), engine = "r")), code_check = NULL, + error_check = NULL, check = NULL, solution = structure(c("tidy(model1)", + "glance(model1)", "plot(model1)"), chunk_opts = list(label = "qapinterp-solution")), tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA, highlight = FALSE, size = "normalsize", @@ -1129,10 +1122,10 @@

    Network linear models

    fig.retina = 2, external = TRUE, sanitize = FALSE, interval = 1, aniopts = "controls,loop", warning = TRUE, error = FALSE, message = TRUE, render = NULL, ref.label = NULL, child = NULL, - engine = "r", split = FALSE, include = TRUE, purl = TRUE, + engine = "r", split = FALSE, include = TRUE, purl = FALSE, max.print = 1000, label = "qapinterp", exercise = TRUE, exercise.setup = "qapmax-solution", code = "", out.width.px = 624, - out.height.px = 384, params.src = "qapinterp, exercise=TRUE, exercise.setup = \"qapmax-solution\"", + out.height.px = 384, params.src = "qapinterp, exercise=TRUE, exercise.setup = \"qapmax-solution\", purl = FALSE", fig.num = 0L, exercise.df_print = "paged", exercise.checker = "NULL"), engine = "r", version = "4"), class = c("r", "tutorial_exercise" ))) @@ -1141,31 +1134,31 @@

    Network linear models

    diff --git a/man/migraph-package.Rd b/man/migraph-package.Rd index a21d2504..a1200823 100644 --- a/man/migraph-package.Rd +++ b/man/migraph-package.Rd @@ -13,7 +13,7 @@ A set of tools for analysing multimodal networks. It includes functions for meas \seealso{ Useful links: \itemize{ - \item \url{https://github.com/snlab-ch/migraph} + \item \url{https://snlab-ch.github.io/migraph/} \item Report bugs at \url{https://github.com/snlab-ch/migraph/issues} }