Lokang 

C++ and MySQL

Ai

Below is an example of a simple AI program in C++ that implements a linear regression model to predict values based on a given dataset. This is a basic example to help you get started with implementing AI algorithms in C++.

put this to your cmake file

cmake_minimum_required(VERSION 3.17)
project(Ai)
set(CMAKE_CXX_STANDARD 14)
add_executable(Ai main.cpp linear_regression.cpp linear_regression.h)

First, let's create a linear_regression.h header file:

#pragma once
#include <vector>
class LinearRegression {
public:
   LinearRegression(const std::vector<double>& x_values, const std::vector<double>& y_values);
   void train(double learning_rate, int iterations);
   double predict(double x) const;
private:
   double m, b;
   std::vector<double> x_values;
   std::vector<double> y_values;
   double compute_cost() const;
   std::pair<double, double> compute_gradients() const;
};

Now, let's implement our LinearRegression class in a linear_regression.cpp file:

#include "linear_regression.h"
#include <iostream>
#include <cmath>
LinearRegression::LinearRegression(const std::vector<double>& x_values, const std::vector<double>& y_values)
   : m(0), b(0), x_values(x_values), y_values(y_values) {}
double LinearRegression::predict(double x) const {
   return m * x + b;
}
double LinearRegression::compute_cost() const {
   double total_error = 0.0;
   size_t n = x_values.size();
   for (size_t i = 0; i < n; ++i) {
       double y_pred = predict(x_values[i]);
       total_error += std::pow(y_pred - y_values[i], 2);
   }
   return total_error / n;
}
std::pair<double, double> LinearRegression::compute_gradients() const {
   double dm = 0.0, db = 0.0;
   size_t n = x_values.size();
   for (size_t i = 0; i < n; ++i) {
       double y_pred = predict(x_values[i]);
       dm += -2 * x_values[i] * (y_values[i] - y_pred);
       db += -2 * (y_values[i] - y_pred);
   }
   return { dm / n, db / n };
}
void LinearRegression::train(double learning_rate, int iterations) {
   for (int i = 0; i < iterations; ++i) {
       auto [dm, db] = compute_gradients();
       m -= learning_rate * dm;
       b -= learning_rate * db;
       
       if (i % 100 == 0) {
           std::cout << "Iteration " << i << ": Cost = " << compute_cost() << std::endl;
       }
   }
}

Finally, let’s use our LinearRegression class in the main program:

#include "linear_regression.h"
#include <iostream>
int main() {
   std::vector<double> x_values = {1, 2, 3, 4, 5};
   std::vector<double> y_values = {2, 3, 5, 7, 11};
   LinearRegression model(x_values, y_values);
   model.train(0.01, 1000);
   double x_to_predict = 6;
   double y_predicted = model.predict(x_to_predict);
   std::cout << "Predicted y for x = " << x_to_predict << " is " << y_predicted << std::endl;
   return 0;
}

In this example, we create a linear regression model to predict y values given x values. The train function updates the model parameters (m and b) to minimize the cost function, which measures the difference between the predicted and actual y values. After training the model, we use it to predict the y value for x = 6.