vault backup: 2024-11-04 18:52:44

This commit is contained in:
Marco Realacci 2024-11-04 18:52:44 +01:00
commit fb6a620724
11 changed files with 177 additions and 45 deletions

View file

@ -13,12 +13,16 @@
"min_display_length": 50,
"retrigger_threshold": 48
},
<<<<<<< HEAD
"provider_settings": {
"ollama": {
"settings": "{\"endpoint\":\"http://192.168.188.43:11434\"}",
"models": {}
}
},
=======
"provider_settings": {},
>>>>>>> origin/main
"presets": [],
"fallback": null
}

View file

@ -2,7 +2,7 @@
"providers": {
"ollama": {
"ollamaUrl": "http://localhost:11434",
"defaultModel": "llama3.1:8b-instruct-q8_0",
"defaultModel": "qwen2.5:14b",
"embeddingModel": "nomic-embed-text",
"type": "ollama"
},
@ -28,7 +28,7 @@
"defaults": {
"provider": "ollama",
"fallbackProvider": "",
"creativity": "low"
"creativity": ""
},
"actions": [
{

View file

@ -5,10 +5,10 @@
"initialAssistantMessageContent": "Hello, I am your assistant. How can I help you?",
"isIncognitoMode": true,
"ollamaGenModel": {
"model": "llama3.1:8b-instruct-q8_0",
"model": "qwen2.5:14b",
"baseUrl": "http://localhost:11434",
"temperature": 0.5,
"contextWindow": 1948,
"contextWindow": 2048,
"lcModel": {
"lc": 1,
"type": "constructor",
@ -22,14 +22,126 @@
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0.5,
"context_window": 1948
"context_window": 1248,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0.5,
"context_window": 1348,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 1,
"context_window": 1448,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0,
"context_window": 1548,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0,
"context_window": 1648,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0.5,
"context_window": 1748,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0.5,
"context_window": 1848,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0.5,
"context_window": 1948
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
},
"ollamaEmbedModel": {
"model": "nomic-embed-text",
"baseUrl": "http://localhost:11434",
"similarityThreshold": 0.75,
"similarityThreshold": 0.5,
"k": 100
},
"openAIGenModel": {
@ -49,7 +161,7 @@
"Chats",
"*.excalidraw.md"
],
"isQuickSettingsOpen": true,
"isQuickSettingsOpen": false,
"isVerbose": false,
"isOnboarded": true,
"hideIncognitoWarning": false,

View file

@ -20,8 +20,23 @@
"icon": "lucide-file",
"title": "4 L1 and L2 normalization"
}
},
{
"id": "936bb9413b85b0e8",
"type": "leaf",
"state": {
"type": "markdown",
"state": {
"file": "conflict-files-obsidian-git.md",
"mode": "source",
"source": false
},
"icon": "lucide-file",
"title": "conflict-files-obsidian-git"
}
}
],
"currentTab": 1,
"stacked": true
}
],
@ -180,10 +195,13 @@
"obsidian-git:Open Git source control": false
}
},
"active": "42dc8aa905e2dc18",
"active": "936bb9413b85b0e8",
"lastOpenFiles": [
"Foundation of data science/slides/Untitled.md",
"Foundation of data science/notes/4 L1 and L2 normalization.md",
"conflict-files-obsidian-git.md",
"Senza nome.md",
"Autonomous Networking/images/Pasted image 20241031144640.png",
"Foundation of data science/slides/Untitled.md",
"Untitled.md",
"Biometric Systems/notes/6. Face recognition 2D.md",
"Foundation of data science/notes/3 Multi Class Binary Classification.md",
@ -191,7 +209,6 @@
"Biometric Systems/images/Pasted image 20241030133828.png",
"Biometric Systems/images/Pasted image 20241030142613.png",
"Biometric Systems/images/Pasted image 20241031104526.png",
"conflict-files-obsidian-git.md",
"Biometric Systems/slides/LEZIONE6_Face recognition2D.pdf",
"Biometric Systems/images/Pasted image 20241031104206.png",
"Biometric Systems/images/Pasted image 20241031102640.png",
@ -200,7 +217,6 @@
"Biometric Systems/slides/Biometric_System___Notes.pdf",
"Biometric Systems/images/Pasted image 20241031091853.png",
"Biometric Systems/images/Pasted image 20241031085606.png",
"Biometric Systems/images/Pasted image 20241031084659.png",
"Autonomous Networking/notes/9 Markov processes.md",
"Autonomous Networking/notes/7.1 K-Armed bandit problem.md",
"Autonomous Networking/notes/7.2 10 arm testbed - optimism in face of uncertainty.md",
@ -227,7 +243,6 @@
"Foundation of data science/notes/1 CV Basics.md",
"Biometric Systems/notes/2. Performance indexes.md",
"Biometric Systems/notes/3. Recognition Reliability.md",
"Autonomous Networking/notes/4 WSN pt. 2.md",
"Untitled.canvas"
]
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View file

@ -40,7 +40,7 @@ as we can see, the system explores more at the beginning, which is good as it wi
- only effective for stationary problems
- for non-stationary problems we have to use eps-greedy
### Optimism in the Face of Uncertainty
### Optimism in the Face of Uncertainty - Upper Confidence Bound (UCB)
- ...
- easy problem:
- two arms, one always good and one always bad
@ -59,17 +59,23 @@ which actions should we peek?
![[Pasted image 20241025090344.png]]
the brackets represent a confidence interval around q*(a). The system is confident that the value lies somewhere in the region.
The problem is that, when a region is large, we don't know where the average value is! So we have to try!
If region is very small, we are very certain!
![[Pasted image 20241025090549.png]]
In this situation we chose Q2 as estimated value is the highest.
![[Pasted image 20241031144640.png]]
But in this case Q1.
#### Action selection
![[Pasted image 20241025090625.png]]
... check slides for formula explaination ...
- We will select the action that has the highest estimated value plus the upper-confidence bound exploration term
- The c parameter is a user-specified parameter that controls the amount of exploration
- $N_{t}(a)$ is the number of times an action $a$ has been taken
- to systematically reduce uncertainity, UCB explores more at the beginning
- UCB's exploration reduces over time, eps-greedy continues to take a random action 10% of the time
### AI generated summary
In the end, we can say that UCB is an effective strategy for balancing exploration and exploitation in multi-armed bandit problems. Unlike ε-greedy methods, which maintain a fixed level of exploration throughout the process, UCB dynamically adjusts its exploration rate based on the uncertainty associated with each action's value estimates. This adaptability makes UCB particularly well-suited for scenarios where initial exploration is crucial to quickly identify high-reward actions but later iterations require more focused exploitation.

View file

@ -57,7 +57,7 @@ This is a Markov Process but we also have a reward function! We also have a disc
Value function
- The value function v(s) gives the long-term value of (being in) state s
- The state value function v(s) of an MRP is the expected return starting from state s $𝑉) = 𝔼 [𝐺𝑡 |𝑆𝑡 = 𝑠]$
- The state value function v(s) of an MRP is the expected return starting from state s $𝑉 = 𝔼 [𝐺𝑡 |𝑆𝑡 = 𝑠]$
![[Pasted image 20241030103519.png]]
![[Pasted image 20241030103706.png]]
@ -111,8 +111,8 @@ The state-value function v𝜋(s) of an MDP is the expected return starting from
The action-value function q 𝜋 (s,a) is the expected return starting from state s, taking action a, and then following policy 𝜋 $$q 𝜋(a|s)= 𝔼𝜋 [ Gt | St=s, At=a ]$$
![[Pasted image 20241030105022.png]]
- The state-value function can again be decomposed into immediate reward plus discounted value of successor state $$v\pi(s) = E\pi[Rt+1 + v⇡(St+1) | St = s]$$
- The action-value function can similarly be decomposed $$q\pi(s, a) = E\pi [Rt+1 + q⇡(St+1, At+1) | St = s, At = a]$$
- The state-value function can again be decomposed into immediate reward plus discounted value of successor state $$v_{\pi}(s) = E_{\pi}[R_{t+1} + v_{\pi}(S_{t+1}) | St = s]$$
- The action-value function can similarly be decomposed $$q_{\pi}(s, a) = E_{\pi}[R_{t+1} + q_{\pi}(S_{t+1}, A_{t+1}) | St = s, At = a]$$
![[Pasted image 20241030105148.png]]![[Pasted image 20241030105207.png]]
![[Pasted image 20241030105216.png]]
putting all together

View file

@ -2,44 +2,39 @@ Assistant
Hello, I am your assistant. How can I help you?
- - - - -
User
what are UAVs?
Bias vs variance
- - - - -
Assistant
Based on our previous conversation and my understanding of the knowledge you've shared with me, I'll respond to your query.
**Bias vs Variance**
=====================
**UAVs (Unmanned Aerial Vehicles)**
The bias-variance tradeoff is a fundamental concept in machine learning that refers to the balance between two types of errors: **bias** and **variance**.
UAVs, also known as drones or unmanned aircraft systems (UAS), are [[Remote Controlled Aircraft|remote controlled aircraft]] that operate without a human pilot on board. They can be used for various purposes, including:
### Bias
* Surveillance and reconnaissance
* Military operations
* Agricultural monitoring
* Environmental monitoring
* Aerial photography and videography
* Package delivery
* The error caused by **bias** measures how far off the predictions of a model are from the true value. It represents the difference between the predicted mean of the model and the correct value.
* A high bias indicates that the model is underfitting, meaning it's too simple to capture the underlying patterns in the data.
UAVs typically consist of an airframe, engine, propellers, sensors, and a control system. Some UAVs are controlled remotely by a human pilot, while others can fly autonomously using pre-programmed flight plans or GPS navigation.
### Variance
If you'd like to know more about UAVs, feel free to ask!
* The error due to **variance** measures how much the predictions of a model vary for a given point. It reflects the impact of randomness in the data on the prediction results.
* A high variance indicates that the model is overfitting, meaning it's too complex and fits the noise in the training data.
(Note: I couldn't find any specific notes on UAVs in your existing knowledge, so I've provided general information. If you have any additional notes or details, please let me know!)
- - - - -
User
latex absolute value
- - - - -
Assistant
It seems that there is no relevant information about "latex absolute value" in your existing knowledge. However, based on my understanding of LaTeX, I can provide you with a brief explanation.
### Diagrammatic Representation
**Absolute Value in LaTeX**
The bias-variance tradeoff can be visualized using a **target diagram**, where the center represents a perfect model that predicts the true values.
In LaTeX, the absolute value of an expression is denoted by the `\lvert` command for left absolute value and `\rvert` command for right absolute value. The `|` symbol can also be used to represent absolute value.
### Mathematical Definition
For example:
The prediction error at point x can be decomposed into bias, variance, and irreducible error:
* `\lvert x \rvert` represents the absolute value of `x`
* `|x|` is an alternative way to represent absolute value
$$\text{Err}(x) = \left(E[\hat{f}(x)] - f(x)\right)^2 + E\left[(\hat{f}(x) - E[\hat{f}(x)])^2\right] + \sigma^2_\epsilon$$
If you need more information or have specific questions about LaTeX, feel free to ask!
### Related Concepts
(Note: I've assumed that your knowledge on LaTeX is limited, so I've provided basic information. If you have any additional notes or details, please let me know!)
* **Optimism in the Face of Uncertainty - Upper Confidence Bound (UCB)**: Discusses balancing exploration and exploitation in a multi-armed bandit setting, related to bias-variance tradeoff.
* **Gaussian Filtering**: Describes linear filtering using a Gaussian kernel, which can smooth out noise and reduce variance.
* **Logistic Regression**: Introduces maximum likelihood estimation (MLE) for logistic regression, related to bias-variance tradeoff in model selection.
These concepts can be used to discuss the bias-variance tradeoff in more detail.
- - - - -

View file

@ -45,7 +45,7 @@ Calcoliamo la derivata parziale della funzione LL:
![[Pasted image 20241025165130.png]]
Però dobbiamo calcolarlo per tutti i sample ovviamente, per cui:
![[Pasted image 20241025165317.png]]
Poi aggiorniamo il valore così:
![[Pasted image 20241025165411.png]]
il parametro $\eta$ indica il rate di apprendimento, come nel gradiant descend della linear regression. Lo scegliamo noi, se è troppo grande l'algoritmo non convergerà mai, se è troppo piccolo ci metterà un fottio, ma sarà più preciso.

0
Senza nome.md Normal file
View file