ACPR+ is autonomous chess playing robot which is a continuation of the last year's project which is the reason for that '+'. As the name suggests , you will be playing chess against this genius bot.

Updates

No Insights yet

7 Documentations
INVERSE KINEMATICS CODE
added 2 months, 2 weeks ago by Yogendra Kumar
#include <Servo.h>

Servo servo1, servo2, servo3;


double dist;
int s1, s2, s3;//Attaching servos at pins 1, 2 and 3 respectively
double * omega;
double link1, link2, link3, velocity, destination[3], current[3];
volatile double theta1, theta2, theta3;
double jacobian[3][3], invJacobian[3][3], velocityVector[3];


void setup() {
  // put your setup code here, to run once:
  Serial.begin(9600);
 

servo1.attach(s1);
servo1.attach(s2);
servo1.attach(s3);

}



void loop() {
  // put your main code here, to run repeatedly:
jacobian[0][0] = -(link1*cos(theta2)+link3*cos(theta3))*sin(theta1);
jacobian[0][1] = -link2*sin(theta2)*cos(theta1);
jacobian[0][2] = -link3*sin(theta3)*cos(theta1);
jacobian[1][0] = (link2*cos(theta2)+link2*cos(theta3))*cos(theta1);
jacobian[1][1] = -link2*sin(theta2)*sin(theta1);
jacobian[1][2] = -link3*sin(theta3)*sin(theta1);
jacobian[2][0] = 0;
jacobian[2][1] = link2*cos(theta2);
jacobian[2][2] = link3*cos(theta3);


int i; int j;            

for(i=0;i<3;i++)
  for(j=0;j<3;j++)
      invJacobian[i][j] = jacobian[i][j];

invMat(invJacobian);

dist = distance(current ,destination);
  for(i=0; i<3;i++)
    velocityVector[i] = (destination[i] - current[i])*velocity/(dist);

double* omega = matMultiply(invJacobian ,velocityVector);


//Directing Servos!

servo1.write(omega[0]);
servo2.write(omega[1]);
servo3.write(omega[2]);
delay(20);

}








//Functions


void invMat(double a[][3]){
  double det;
  int i,j;
  det=a[0][0]*(a[1][1]*a[2][2]-a[2][1]*a[1][2])-a[0][1]*(a[1][0]*a[2][2]-a[1][2]*a[2][0])+a[0][2]*(a[1][0]*a[2][1]-a[1][1]*a[2][0]);
  if(det!=0){
    for(i=0;i<3;i++){
        for(j=0;j<3;j++){
          int sign=(-1)^(i+j);
          a[i][j]=(((a[(i+1)%3][(j+1)%3] * a[(i+2)%3][(j+2)%3]) - (a[(i+1)%3][(j+2)%3]*a[(i+2)%3][(j+1)%3])))/det;
        }
    }
  }
}

double* matMultiply(double b[][3], double c[]){
  double d[3];
  int i,j;
  for(i=0;i<3;i++){
    double sum=0;
      for(j=0;j<3;j++){
        sum= sum+b[i][j]*c[j];
      }
      d[i]=sum;
  }
  return d;
}

double distance( double p[],double q[]){
  double sum=0;
  int i;
  for(i=0;i<3;i++){
    sum=sum+(p[i]-q[i])*(p[i]-q[i]);
  }
 
  return abs(sqrt(sum));
}
3D SIMULATION CODE
added 2 months, 2 weeks ago by Yogendra Kumar
float beta, theta,x,y, phi,z,Z, theta2, beta2, posX, posY,posZ;

void setup()
{
size(500,500, P3D);
}


void draw(){

  background(200);
if(keyPressed)
{
  keyPressed();
  theta2 = getTheta(mouseY, Z);
  beta2 = getBeta(mouseY, Z);
  z = Z/2 - 100*cos(theta2/2)*sin(beta2);
}
beta = getBeta(mouseX, mouseY);
theta = getTheta(mouseX, mouseY);
phi = getPhi(mouseX,mouseY,Z);
// translate(100,100,0);
x = (mouseX)/2 + 100*cos(phi)*cos(theta/2)*cos(beta);
y = (mouseY)/2 +100*cos(phi)*cos(theta/2)*sin(beta);

posX=x;
posY=-y;
posZ=-z;
rectMode(CENTER);
fill(51);
stroke(255);
  pushMatrix();
translate(0, 25, 0);
//rotateX(PI/8);
box(10,50,10);
  popMatrix();
//line(0,50,0,0,0,0);
//line(0,-25,0,x,y,z);

pushMatrix();

//translate(100,75,0);
rotateX(PI);

rotateY(atan(-posZ/posX));
rotateZ((PI/2)- atan(posY/sqrt((posX*posX)+(posY*posY)+(posZ*posZ))));
translate((posX)/2,(posY)/2,posZ/2);
rectMode(CENTER);
fill(51);
stroke(255);
box(100,10,10);

popMatrix();
line(0,0,0,x,y,z);
line(x,y,z,mouseX,mouseY,Z);

//translate((x+100)/2,(y+75)/2,(z/2));

//box(10,100,10);
//line(x,y,z,posX,posY,posZ);
//popMatrix();
}


float getBeta(float a, float b)
{

return (PI/2+atan(b/a));
}

float getTheta(float a, float b){
  float theta = 2*asin(sqrt(a*a+b*b)/200);
  return theta;
}
float getPhi(float a,float b,float c)
{
  return atan(sqrt((b*b+c*c)/(a*a+b*b)));
}
void keyPressed()
{
  Z=10*(float(key)-48);
  println(Z);
}
3S SIMULATION CODE
added 2 months, 2 weeks ago by Yogendra Kumar
float beta, theta,x,y, phi,z,Z, theta2, beta2;

void setup(){
size(500,500, P3D);
}


void draw(){
  background(200);
if(keyPressed)
{
  keyPressed();
  theta2 = getTheta(mouseY, Z);
  beta2 = getBeta(mouseY, Z);
  z = Z/2 - 100*cos(theta2/2)*sin(beta2);
}
beta = getBeta(mouseX, mouseY);
theta = getTheta(mouseX, mouseY);
phi = getPhi(mouseX,mouseY,Z);
//translate(100,100,0);
x = mouseX/2 + 100*cos(phi)*cos(theta/2)*cos(beta);
y = mouseY/2 + 100*cos(phi)*cos(theta/2)*sin(beta);


line(0,50,0,0,0,0);
line(0,0,0,x,y,z);
line(x,y,z,mouseX,mouseY,Z);
}


float getBeta(float a, float b){

return atan(-a/b);
}

float getTheta(float a, float b){
  float theta = 2*asin(sqrt(a*a+b*b)/200);
  return theta;
}
float getPhi(float a,float b,float c)
{
  return atan(sqrt((b*b+c*c)/(a*a+b*b)));
}
void keyPressed()
{
  Z=10*(float(key)-48);
  println(Z);
}
2D simulation code
added 2 months, 2 weeks ago by Yogendra Kumar
float w, beta, theta,x,y,z;

void setup(){
size(500,500, P3D);
}


void draw(){
w = dist(100,400,mouseX, mouseY);
  if(w>200){
    textSize(32);
    text("No solution", 200, 50);
  }
else{
    background(200);

beta = getBeta(mouseX-100, mouseY-400);
theta = getTheta(mouseX-100, mouseY-400);

x = (mouseX+100)/2 - 100*cos(theta/2)*cos(beta);
y = (mouseY+400)/2 -100*cos(theta/2)*sin(beta);

line(100,450,100,400);
line(100,400,x,y);
line(x,y,mouseX,mouseY);
line(mouseX,mouseY,mouseX,mouseY+30);
}
}


float getBeta(float a, float b){

return PI/2 +atan(b/a);
}

float getTheta(float a, float b){
  float theta = 2*asin(sqrt(a*a+b*b)/200);
  return theta;
}

void keyPressed(){
  z = 10*float(key);
}
Use of computer vision algorithm in opencv to detect the pieces on the chess board.
added 2 months, 2 weeks ago by Pranay Shah
This is the trivial version of our algorithm or say basic version which aims to detect the move of either a black or a white piece on the chessboard.

->Firstly an image is stored in the memory which is the image of the original configuration of the pieces on the chess board.
->Then a live video streaming of the game is done during which after a player has moved his turn he will press a particular key and  an image will be captured.
-> Then this image and already stored image is converted into the gray scale image whose value lies between 0 to 255
->Then this grayscale images are converted into their respective matrices
->These matrices then are converted into the sixty four submatrices to get the information of each of the individiual squares so that when we compare we can come to know about the individiual squares
->On comparision, wherever we observe the change in the submatrices the piecemovement is traced.
Use of computer vision to detect pieces in opencv.
added 2 months, 3 weeks ago by Pranay Shah
IP or image Processing is done mainly to detect the pieces on the particular square of chess board. The algorithm we are going to follow is as given below.

1) Capture the image-  In this part, a live video stream of the game will be going on and as soon as the player is done with his chance, he has to press a key (any key which has a specific ASCII value). Then the image at that instant (instant when player presses the key) is capture which is then stored in the memory of the processor and further image processing on that image occurs. For this we are going to use a Logitech C270 camera (480*640 resolution) which is fixed at the top center of the chess board using the stand whose distance will be set on the basis of the dimensions of the chess board.

2) Corner Detection- Next step in the algorithm is the corner detection which is done using Shi-Tomasi corner detection algorithm (http://docs.opencv.org/doc/tutorials/features2d/trackingmotion/good_features_to_track/good_features_to_track.html). It will detect all the corners of all the individiual square pieces i.e is intersection points. Then the main 4 corners are detected using pixel calculations.

3) Edge Detection- Next step in the algorithm is the edge detection which is done using Canny Edge Detection Technique to seperate all the rows and columns of the chess board to isolate all the individiual squares.

4) Dilation operation- The dilation operation is then applied on the edge detected binary image on each of the chess squares, and the number of white pixels is then compared with a designated threshold value. If it exceeds the threshold value, then the chess square is occupied otherwise empty.

5) Erosion operation-  The image is thresholded.  After thresholding, the black pieces stay black whereas everything else becomes white. Then, the erosion operation is applied on the thresholded image on each of the chess squares, and the number of black pixels is then compared with a designated threshold value. If the value exceeds this threshold value, then the chess piece is black otherwise white if the square is not otherwise empty.

6) Chess Move Detection-  After thresholding, the black pieces stay black whereas everything else becomes white. Then, the erosion operation is applied on the thresholded image on each of the chess squares, and the number of black pixels is then compared with a designated threshold value. If the value exceeds this threshold value, then the chess piece is black otherwise white if the square is not otherwise empty.

After this move is realised, the input is then fed to the Chess code in the Raspberry Pie which is further linked to the robotic arm servo motors.
















Use of computer vision algorithm in opencv to detect the pieces on the chess board.
added 2 months, 3 weeks ago by Pranay Shah
IP or image Processing is done mainly to detect the pieces on the particular square of chess board. The algorithm we are going to follow is as given below.

1) Capture the image-  In this part, a live video stream of the game will be going on and as soon as the player is done with his chance, he has to press a key (any key which has a specific ASCII value). Then the image at that instant (instant when player presses the key) is capture which is then stored in the memory of the processor and further image processing on that image occurs. For this we are going to use a Logitech C270 camera (480*640 resolution) which is fixed at the top center of the chess board using the stand whose distance will be set on the basis of the dimensions of the chess board.

2) Corner Detection- Next step in the algorithm is the corner detection which is done using Shi-Tomasi corner detection algorithm (http://docs.opencv.org/doc/tutorials/features2d/trackingmotion/good_features_to_track/good_features_to_track.html). It will detect all the corners of all the individiual square pieces i.e is intersection points. Then the main 4 corners are detected using pixel calculations.

3) Edge Detection- Next step in the algorithm is the edge detection which is done using Canny Edge Detection Technique